You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by op...@apache.org on 2019/03/11 11:45:46 UTC

[hbase] branch HBASE-21879 updated (582890c -> 6d95335)

This is an automated email from the ASF dual-hosted git repository.

openinx pushed a change to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git.


    from 582890c  HBASE-21917 Make the HFileBlock#validateChecksum can accept ByteBuff as an input. (addendum)
     new 6e61472  HBASE-21949 Fix flaky test TestHBaseTestingUtility.testMiniZooKeeperWithMultipleClientPorts
     new 1d5ae6f  HBASE-21871 Added support to specify a peer table name in VerifyReplication tool
     new c989790  HBASE-20754 [documentation] quickstart guide should instruct folks to set JAVA_HOME to a JDK installation.
     new 9ffb9c5  HBASE-21874 Bucket cache on Persistent memory
     new f935a75  SE-22006 Fix branch-2.1 findbugs warning; causes nightly show as failed.
     new a3c36f8  HBASE-21999 [DEBUG] Exit if git returns empty revision!
     new 06ca2ce  HBASE-22000 Deprecated isTableAvailable with splitKeys
     new 1b1352a  Revert "SE-22006 Fix branch-2.1 findbugs warning; causes nightly show as failed."
     new c19280b  Revert "HBASE-21874 Bucket cache on Persistent memory"
     new 9dc522b  HBASE-21874 Bucket cache on Persistent memory
     new 8b73fae  HBASE-22006 Fix branch-2.1 findbugs warning; causes nightly show as failed.
     new 460b508  HBASE-20918 Re-enable TestRpcHandlerException
     new 3bc1e79  HBASE-21999 ADDENDUM unknown revisions are okay; make sure we indicate them.
     new 9fd2639  HBASE-22007 Add restoreSnapshot and cloneSnapshot with acl methods in AsyncAdmin
     new aa4d3b7  HBASE-22010 Upgrade to 2.2 section header can't use spaces.
     new 31902a0  HBASE-21416 - fix TestRegionInfoDisplay flaky test
     new b1545c3  HBASE-21990 puppycrawl checkstyle dtds 404... moved to sourceforge ADDENDUM -- dtds moved location. See https://github.com/checkstyle/checkstyle/issues/6478
     new a6b94d8  HBASE-21135 Build fails on windows as it fails to parse windows path during license check
     new 0850b4b  HBASE-21987 Simplify RSGroupInfoManagerImpl#flushConfig() for offline mode
     new f1e3e60  HBASE-21959 - CompactionTool should close the store it uses for compacting files, in order to properly archive compacted files.
     new 07f9bef  HBASE-21736 Remove the server from online servers before scheduling SCP for it in hbck
     new 18772ff  HBASE-22001 Polish the Admin interface
     new 6cb2dbe  HBASE-22011 ThriftUtilities.getFromThrift should set filter when not set columns
     new 6d95335  HBASE-22016 Rewrite the block reading methods by using hbase.nio.ByteBuff

The 24 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../resources/hbase/checkstyle-suppressions.xml    |   7 -
 .../src/main/resources/hbase/checkstyle.xml        |   7 +-
 .../java/org/apache/hadoop/hbase/client/Admin.java | 480 +++++++++++++--------
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java |  34 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java       |  10 +-
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 306 ++-----------
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |  86 ++--
 .../hadoop/hbase/client/TestInterfaceAlign.java    |  11 +-
 .../hadoop/hbase/client/TestRegionInfoDisplay.java |   5 +-
 hbase-common/pom.xml                               |   4 +-
 .../org/apache/hadoop/hbase/util/FutureUtils.java  |  21 +
 hbase-common/src/main/resources/hbase-default.xml  |   6 +-
 hbase-common/src/saveVersion.sh                    |  18 +-
 .../mapreduce/replication/VerifyReplication.java   |  28 +-
 .../hadoop/hbase/regionserver/CompactionTool.java  |   2 +
 .../hbase/regionserver/TestCompactionTool.java     | 106 +++++
 .../hbase/replication/TestVerifyReplication.java   | 136 ++++++
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      |  29 +-
 .../apache/hadoop/hbase/io/hfile/BlockIOUtils.java | 223 ++++++++++
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   | 337 ++++++---------
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  10 +-
 .../hfile/bucket/ExclusiveMemoryMmapIOEngine.java  |  50 +++
 .../{FileMmapEngine.java => FileMmapIOEngine.java} |  73 ++--
 .../io/hfile/bucket/SharedMemoryMmapIOEngine.java  |  64 +++
 .../hadoop/hbase/master/MasterRpcServices.java     |   3 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |   5 +-
 .../hadoop/hbase/TestHBaseTestingUtility.java      |  16 +-
 ...otWithAcl.java => SnapshotWithAclTestBase.java} | 101 ++---
 .../org/apache/hadoop/hbase/client/TestHbck.java   |  84 ++--
 .../hadoop/hbase/client/TestSnapshotWithAcl.java   | 221 +---------
 ...All.java => TestSnapshotWithAclAsyncAdmin.java} |  50 +--
 ...ckPositionalRead.java => TestBlockIOUtils.java} | 122 +++++-
 .../apache/hadoop/hbase/io/hfile/TestChecksum.java |  14 +-
 ...ine.java => TestExclusiveMemoryMmapEngine.java} |   8 +-
 .../hadoop/hbase/ipc/TestRpcHandlerException.java  |   2 -
 .../master/cleaner/TestSnapshotFromMaster.java     |   8 +-
 .../hbase/replication/TestReplicationBase.java     |   6 +-
 .../hadoop/hbase/thrift2/ThriftUtilities.java      |  18 +-
 .../hadoop/hbase/thrift2/client/ThriftAdmin.java   | 112 +----
 .../hadoop/hbase/thrift2/TestThriftConnection.java |  21 +
 pom.xml                                            |  24 +-
 src/main/asciidoc/_chapters/architecture.adoc      |   2 +
 src/main/asciidoc/_chapters/getting_started.adoc   |  21 +-
 src/main/asciidoc/_chapters/hbase-default.adoc     |  11 +-
 src/main/asciidoc/_chapters/upgrading.adoc         |   2 +-
 45 files changed, 1591 insertions(+), 1313 deletions(-)
 create mode 100644 hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockIOUtils.java
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java
 rename hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/{FileMmapEngine.java => FileMmapIOEngine.java} (66%)
 create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java
 copy hbase-server/src/test/java/org/apache/hadoop/hbase/client/{TestSnapshotWithAcl.java => SnapshotWithAclTestBase.java} (74%)
 copy hbase-server/src/test/java/org/apache/hadoop/hbase/client/{TestAsyncTableScanAll.java => TestSnapshotWithAclAsyncAdmin.java} (54%)
 rename hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/{TestHFileBlockPositionalRead.java => TestBlockIOUtils.java} (54%)
 rename hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/{TestFileMmapEngine.java => TestExclusiveMemoryMmapEngine.java} (90%)


[hbase] 21/24: HBASE-21736 Remove the server from online servers before scheduling SCP for it in hbck

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 07f9bef04c7f901bfdb482fb0414fca90a3962e5
Author: zhangduo <zh...@apache.org>
AuthorDate: Sat Mar 9 08:51:35 2019 +0800

    HBASE-21736 Remove the server from online servers before scheduling SCP for it in hbck
    
    Signed-off-by: Peter Somogyi <ps...@apache.org>
---
 .../hadoop/hbase/master/MasterRpcServices.java     |  3 +-
 .../org/apache/hadoop/hbase/client/TestHbck.java   | 84 +++++++++++-----------
 2 files changed, 44 insertions(+), 43 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index e5fc0b8..063a353 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -2477,9 +2477,10 @@ public class MasterRpcServices extends RSRpcServices
       for (HBaseProtos.ServerName serverName : serverNames) {
         ServerName server = ProtobufUtil.toServerName(serverName);
         if (shouldSubmitSCP(server)) {
+          master.getServerManager().moveFromOnlineToDeadServers(server);
           ProcedureExecutor<MasterProcedureEnv> procExec = this.master.getMasterProcedureExecutor();
           pids.add(procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(),
-              server, true, containMetaWals(server))));
+            server, true, containMetaWals(server))));
         } else {
           pids.add(-1L);
         }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java
index 2951600..8318757 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHbck.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,12 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.client;
 
-import static junit.framework.TestCase.assertTrue;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.List;
 import java.util.stream.Collectors;
@@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Rule;
@@ -59,16 +59,14 @@ import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 
 /**
- * Class to test HBaseHbck.
- * Spins up the minicluster once at test start and then takes it down afterward.
- * Add any testing of HBaseHbck functionality here.
+ * Class to test HBaseHbck. Spins up the minicluster once at test start and then takes it down
+ * afterward. Add any testing of HBaseHbck functionality here.
  */
 @RunWith(Parameterized.class)
-@Category({LargeTests.class, ClientTests.class})
+@Category({ LargeTests.class, ClientTests.class })
 public class TestHbck {
   @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestHbck.class);
+  public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHbck.class);
 
   private static final Logger LOG = LoggerFactory.getLogger(TestHbck.class);
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@@ -112,15 +110,20 @@ public class TestHbck {
     TEST_UTIL.shutdownMiniCluster();
   }
 
+  @Before
+  public void setUp() throws IOException {
+    TEST_UTIL.ensureSomeRegionServersAvailable(3);
+  }
+
   public static class SuspendProcedure extends
       ProcedureTestingUtility.NoopProcedure<MasterProcedureEnv> implements TableProcedureInterface {
     public SuspendProcedure() {
       super();
     }
 
+    @SuppressWarnings({ "rawtypes", "unchecked" })
     @Override
-    protected Procedure[] execute(final MasterProcedureEnv env)
-        throws ProcedureSuspendedException {
+    protected Procedure[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
       // Always suspend the procedure
       throw new ProcedureSuspendedException();
     }
@@ -143,8 +146,8 @@ public class TestHbck {
     long procId = procExec.submitProcedure(proc);
     Thread.sleep(500);
 
-    //bypass the procedure
-    List<Long> pids = Arrays.<Long>asList(procId);
+    // bypass the procedure
+    List<Long> pids = Arrays.<Long> asList(procId);
     List<Boolean> results = getHbck().bypassProcedure(pids, 30000, false, false);
     assertTrue("Failed to by pass procedure!", results.get(0));
     TEST_UTIL.waitFor(5000, () -> proc.isSuccess() && proc.isBypass());
@@ -159,9 +162,9 @@ public class TestHbck {
     // Method {@link Hbck#setTableStateInMeta()} returns previous state, which in this case
     // will be DISABLED
     TableState prevState =
-        hbck.setTableStateInMeta(new TableState(TABLE_NAME, TableState.State.ENABLED));
+      hbck.setTableStateInMeta(new TableState(TABLE_NAME, TableState.State.ENABLED));
     assertTrue("Incorrect previous state! expeced=DISABLED, found=" + prevState.getState(),
-        prevState.isDisabled());
+      prevState.isDisabled());
   }
 
   @Test
@@ -169,33 +172,33 @@ public class TestHbck {
     Hbck hbck = getHbck();
     try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
       List<RegionInfo> regions = admin.getRegions(TABLE_NAME);
-      for (RegionInfo ri: regions) {
-        RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
-            getRegionStates().getRegionState(ri.getEncodedName());
+      for (RegionInfo ri : regions) {
+        RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
+          .getRegionStates().getRegionState(ri.getEncodedName());
         LOG.info("RS: {}", rs.toString());
       }
-      List<Long> pids = hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).
-          collect(Collectors.toList()));
+      List<Long> pids =
+        hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
       waitOnPids(pids);
-      for (RegionInfo ri: regions) {
-        RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
-            getRegionStates().getRegionState(ri.getEncodedName());
+      for (RegionInfo ri : regions) {
+        RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
+          .getRegionStates().getRegionState(ri.getEncodedName());
         LOG.info("RS: {}", rs.toString());
         assertTrue(rs.toString(), rs.isClosed());
       }
-      pids = hbck.assigns(regions.stream().map(r -> r.getEncodedName()).
-          collect(Collectors.toList()));
+      pids =
+        hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
       waitOnPids(pids);
-      for (RegionInfo ri: regions) {
-        RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
-            getRegionStates().getRegionState(ri.getEncodedName());
+      for (RegionInfo ri : regions) {
+        RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
+          .getRegionStates().getRegionState(ri.getEncodedName());
         LOG.info("RS: {}", rs.toString());
         assertTrue(rs.toString(), rs.isOpened());
       }
       // What happens if crappy region list passed?
-      pids = hbck.assigns(Arrays.stream(new String [] {"a", "some rubbish name"}).
-          collect(Collectors.toList()));
-      for (long pid: pids) {
+      pids = hbck.assigns(
+        Arrays.stream(new String[] { "a", "some rubbish name" }).collect(Collectors.toList()));
+      for (long pid : pids) {
         assertEquals(org.apache.hadoop.hbase.procedure2.Procedure.NO_PROC_ID, pid);
       }
     }
@@ -209,21 +212,18 @@ public class TestHbck {
     ServerName serverName = testRs.getServerName();
     Hbck hbck = getHbck();
     List<Long> pids =
-        hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
+      hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
     assertTrue(pids.get(0) > 0);
     LOG.info("pid is {}", pids.get(0));
 
-    pids = hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
-    assertTrue(pids.get(0) == -1);
-    LOG.info("pid is {}", pids.get(0));
+    List<Long> newPids =
+      hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
+    assertTrue(newPids.get(0) < 0);
+    LOG.info("pid is {}", newPids.get(0));
+    waitOnPids(pids);
   }
 
   private void waitOnPids(List<Long> pids) {
-    for (Long pid: pids) {
-      while (!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().
-          isFinished(pid)) {
-        Threads.sleep(100);
-      }
-    }
+    TEST_UTIL.waitFor(60000, () -> pids.stream().allMatch(procExec::isFinished));
   }
 }


[hbase] 23/24: HBASE-22011 ThriftUtilities.getFromThrift should set filter when not set columns

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 6cb2dbe581da8e9b0d0fd89245b89805e20f4954
Author: Bing Xiao <bu...@gmail.com>
AuthorDate: Mon Mar 11 15:16:15 2019 +0800

    HBASE-22011 ThriftUtilities.getFromThrift should set filter when not set columns
---
 .../hadoop/hbase/thrift2/ThriftUtilities.java       | 18 ++++++++----------
 .../hadoop/hbase/thrift2/TestThriftConnection.java  | 21 +++++++++++++++++++++
 2 files changed, 29 insertions(+), 10 deletions(-)

diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
index 204d20d..1fc85e5 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
@@ -180,18 +180,16 @@ public final class ThriftUtilities {
       out.setCheckExistenceOnly(in.isExistence_only());
     }
 
-
-    if (!in.isSetColumns()) {
-      return out;
-    }
-
-    for (TColumn column : in.getColumns()) {
-      if (column.isSetQualifier()) {
-        out.addColumn(column.getFamily(), column.getQualifier());
-      } else {
-        out.addFamily(column.getFamily());
+    if (in.isSetColumns()) {
+      for (TColumn column : in.getColumns()) {
+        if (column.isSetQualifier()) {
+          out.addColumn(column.getFamily(), column.getQualifier());
+        } else {
+          out.addFamily(column.getFamily());
+        }
       }
     }
+
     if (in.isSetFilterBytes()) {
       out.setFilter(filterFromThrift(in.getFilterBytes()));
     }
diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
index 2c9bf69..a11f2e8 100644
--- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
+++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
 import org.apache.hadoop.hbase.filter.ColumnValueFilter;
 import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.PrefixFilter;
@@ -316,6 +317,26 @@ public class TestThriftConnection {
   }
 
   @Test
+  public void testHBASE22011()throws Exception{
+    testHBASE22011(thriftConnection, "testHBASE22011Table");
+    testHBASE22011(thriftHttpConnection, "testHBASE22011HttpTable");
+  }
+
+  public void testHBASE22011(Connection connection, String tableName) throws IOException {
+    createTable(thriftAdmin, tableName);
+    try (Table table = connection.getTable(TableName.valueOf(tableName))){
+      Get get = new Get(ROW_2);
+      Result result = table.get(get);
+      assertEquals(2, result.listCells().size());
+
+      ColumnCountGetFilter filter = new ColumnCountGetFilter(1);
+      get.setFilter(filter);
+      result = table.get(get);
+      assertEquals(1, result.listCells().size());
+    }
+  }
+
+  @Test
   public void testMultiGet() throws Exception {
     testMultiGet(thriftConnection, "testMultiGetTable");
     testMultiGet(thriftHttpConnection, "testMultiGetHttpTable");


[hbase] 01/24: HBASE-21949 Fix flaky test TestHBaseTestingUtility.testMiniZooKeeperWithMultipleClientPorts

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 6e61472f2c6188dd27dbb0f023421da2ed99388d
Author: maoling <ma...@sina.com>
AuthorDate: Tue Mar 5 17:09:54 2019 +0800

    HBASE-21949 Fix flaky test TestHBaseTestingUtility.testMiniZooKeeperWithMultipleClientPorts
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 .../org/apache/hadoop/hbase/TestHBaseTestingUtility.java | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java
index 0ec97ef..76a2370 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java
@@ -275,7 +275,9 @@ public class TestHBaseTestingUtility {
       List<Integer> clientPortListInCluster = cluster1.getClientPortList();
 
       for (i = 0; i < clientPortListInCluster.size(); i++) {
-        assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList1[i]);
+        // cannot assert the specific port due to the port conflict in which situation
+        // it always chooses a bigger port by +1. The below is the same.
+        assertTrue(clientPortListInCluster.get(i).intValue() >= clientPortList1[i]);
       }
     } finally {
       hbt.shutdownMiniZKCluster();
@@ -292,11 +294,11 @@ public class TestHBaseTestingUtility {
 
       for (i = 0, j = 0; i < clientPortListInCluster.size(); i++) {
         if (i < clientPortList2.length) {
-          assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList2[i]);
+          assertTrue(clientPortListInCluster.get(i).intValue() >= clientPortList2[i]);
         } else {
           // servers with no specified client port will use defaultClientPort or some other ports
           // based on defaultClientPort
-          assertEquals(clientPortListInCluster.get(i).intValue(), defaultClientPort + j);
+          assertTrue(clientPortListInCluster.get(i).intValue() >= defaultClientPort + j);
           j++;
         }
       }
@@ -317,9 +319,9 @@ public class TestHBaseTestingUtility {
         // Servers will only use valid client ports; if ports are not specified or invalid,
         // the default port or a port based on default port will be used.
         if (i < clientPortList3.length && clientPortList3[i] > 0) {
-          assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList3[i]);
+          assertTrue(clientPortListInCluster.get(i).intValue() >= clientPortList3[i]);
         } else {
-          assertEquals(clientPortListInCluster.get(i).intValue(), defaultClientPort + j);
+          assertTrue(clientPortListInCluster.get(i).intValue() >= defaultClientPort + j);
           j++;
         }
       }
@@ -343,9 +345,9 @@ public class TestHBaseTestingUtility {
         // Servers will only use valid client ports; if ports are not specified or invalid,
         // the default port or a port based on default port will be used.
         if (i < clientPortList4.length && clientPortList4[i] > 0) {
-          assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList4[i]);
+          assertTrue(clientPortListInCluster.get(i).intValue() >= clientPortList4[i]);
         } else {
-          assertEquals(clientPortListInCluster.get(i).intValue(), defaultClientPort + j);
+          assertTrue(clientPortListInCluster.get(i).intValue() >= defaultClientPort + j);
           j +=2;
         }
       }


[hbase] 13/24: HBASE-21999 ADDENDUM unknown revisions are okay; make sure we indicate them.

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 3bc1e79f71a42c41307199d21fb28e3e333fa42e
Author: Sean Busbey <bu...@apache.org>
AuthorDate: Thu Mar 7 10:33:32 2019 -0600

    HBASE-21999 ADDENDUM unknown revisions are okay; make sure we indicate them.
---
 hbase-common/pom.xml            |  4 ++--
 hbase-common/src/saveVersion.sh | 14 ++++++--------
 2 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 670dab2..3377016 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -86,13 +86,13 @@
           </execution>
                 <!-- Generate web app sources -->
                 <execution>
-                    <id>generate</id>
+                    <id>generate-Version-information</id>
                     <phase>generate-sources</phase>
                     <configuration>
                         <target>
                             <property name="generated.sources" location="${project.build.directory}/generated-sources"/>
 
-                            <exec executable="bash">
+                            <exec executable="bash" failonerror="true">
                                 <arg line="${basedir}/src/saveVersion.sh ${project.version} ${generated.sources}/java"/>
                             </exec>
                         </target>
diff --git a/hbase-common/src/saveVersion.sh b/hbase-common/src/saveVersion.sh
index 97fe6b4..507bbb0 100644
--- a/hbase-common/src/saveVersion.sh
+++ b/hbase-common/src/saveVersion.sh
@@ -37,20 +37,18 @@ fi
 date=`date`
 cwd=`pwd`
 if [ -d .svn ]; then
-  revision=`svn info | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'`
-  url=`svn info | sed -n -e 's/^URL: \(.*\)/\1/p'`
+  revision=`(svn info | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p') || true`
+  url=`(svn info | sed -n -e 's/^URL: \(.*\)/\1/p') || true`
 elif [ -d .git ]; then
-  revision=`git log -1 --no-show-signature --pretty=format:"%H"`
+  revision=`git log -1 --no-show-signature --pretty=format:"%H" || true`
   hostname=`hostname`
   url="git://${hostname}${cwd}"
-else
+fi
+if [ -z "${revision}" ]; then
+  echo "[WARN] revision info is empty! either we're not in VCS or VCS commands failed." >&2
   revision="Unknown"
   url="file://$cwd"
 fi
-if [ -z $revision ]; then
-  echo "$revision is empty!"
-  exit 1
-fi
 if ! [  -x "$(command -v md5sum)" ]; then
   if ! [ -x "$(command -v md5)" ]; then
     srcChecksum="Unknown"


[hbase] 12/24: HBASE-20918 Re-enable TestRpcHandlerException

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 460b508a1ba0f46da73b3d86cd6dfae150a2ec94
Author: Jack Bearden <ja...@jackbearden.com>
AuthorDate: Sat Jul 21 20:48:13 2018 -0700

    HBASE-20918 Re-enable TestRpcHandlerException
    
    Signed-off-by: Sean Busbey <bu...@apache.org>
---
 .../test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java  | 2 --
 1 file changed, 2 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
index 88f2434..e173e23 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
 import org.apache.hadoop.hbase.testclassification.RPCTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.junit.ClassRule;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -89,7 +88,6 @@ public class TestRpcHandlerException {
    * This is a unit test to make sure to abort region server when the number of Rpc handler thread
    * caught errors exceeds the threshold. Client will hang when RS aborts.
    */
-  @Ignore
   @Test
   public void testRpcScheduler() throws IOException, InterruptedException {
     PriorityFunction qosFunction = mock(PriorityFunction.class);


[hbase] 03/24: HBASE-20754 [documentation] quickstart guide should instruct folks to set JAVA_HOME to a JDK installation.

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit c989790edbce0edeaad3f98950d68a6d33a99f00
Author: Gokul <gk...@hortonworks.com>
AuthorDate: Thu Feb 28 11:11:55 2019 +0530

    HBASE-20754 [documentation] quickstart guide should instruct folks to set JAVA_HOME to a JDK installation.
    
    Signed-off-by: Sean Busbey <bu...@apache.org>
---
 src/main/asciidoc/_chapters/getting_started.adoc | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/src/main/asciidoc/_chapters/getting_started.adoc b/src/main/asciidoc/_chapters/getting_started.adoc
index 84ebcaa..96bc9d0 100644
--- a/src/main/asciidoc/_chapters/getting_started.adoc
+++ b/src/main/asciidoc/_chapters/getting_started.adoc
@@ -67,18 +67,15 @@ $ tar xzvf hbase-{Version}-bin.tar.gz
 $ cd hbase-{Version}/
 ----
 
-. You are required to set the `JAVA_HOME` environment variable before starting HBase.
-  You can set the variable via your operating system's usual mechanism, but HBase
-  provides a central mechanism, _conf/hbase-env.sh_.
-  Edit this file, uncomment the line starting with `JAVA_HOME`, and set it to the
-  appropriate location for your operating system.
-  The `JAVA_HOME` variable should be set to a directory which contains the executable file _bin/java_.
-  Most modern Linux operating systems provide a mechanism, such as /usr/bin/alternatives on RHEL or CentOS, for transparently switching between versions of executables such as Java.
-  In this case, you can set `JAVA_HOME` to the directory containing the symbolic link to _bin/java_, which is usually _/usr_.
-+
-----
-JAVA_HOME=/usr
-----
+. You must set the `JAVA_HOME` environment variable before starting HBase.
+  To make this easier, HBase lets you set it within the _conf/hbase-env.sh_ file. You must locate where Java is
+  installed on your machine, and one way to find this is by using the _whereis java_ command. Once you have the location,
+  edit the _conf/hbase-env.sh_ file and uncomment the line starting with _#export JAVA_HOME=_, and then set it to your Java installation path.
++
+.Example extract from _hbase-env.sh_ where _JAVA_HOME_ is set
+  # Set environment variables here.
+  # The java implementation to use.
+  export JAVA_HOME=/usr/jdk64/jdk1.8.0_112
 +
 
 . Edit _conf/hbase-site.xml_, which is the main HBase configuration file.


[hbase] 11/24: HBASE-22006 Fix branch-2.1 findbugs warning; causes nightly show as failed.

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 8b73fae06de026a40a996e47fb92bdce7d08ec9f
Author: stack <st...@apache.org>
AuthorDate: Wed Mar 6 15:59:06 2019 -0800

    HBASE-22006 Fix branch-2.1 findbugs warning; causes nightly show as failed.
    
    (cherry picked from commit 18d02ffd5df01433d9e15c7a84fb225adf2a22e0)
---
 .../java/org/apache/hadoop/hbase/regionserver/HRegionServer.java     | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index f983882..e407285 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1613,9 +1613,8 @@ public class HRegionServer extends HasThread implements
           MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT);
       int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT);
       // init the chunkCreator
-      ChunkCreator chunkCreator =
-          ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage,
-      initialCountPercentage, this.hMemManager);
+      ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage,
+        initialCountPercentage, this.hMemManager);
     }
   }
 


[hbase] 14/24: HBASE-22007 Add restoreSnapshot and cloneSnapshot with acl methods in AsyncAdmin

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 9fd263995d83e0380c93cdc484c06099aadcdbbf
Author: zhangduo <zh...@apache.org>
AuthorDate: Thu Mar 7 20:52:21 2019 +0800

    HBASE-22007 Add restoreSnapshot and cloneSnapshot with acl methods in AsyncAdmin
    
    Signed-off-by: Zheng Hu <op...@gmail.com>
---
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java |  32 ++-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java       |  10 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |  86 ++++----
 ...otWithAcl.java => SnapshotWithAclTestBase.java} | 101 ++++------
 .../hadoop/hbase/client/TestSnapshotWithAcl.java   | 221 ++-------------------
 .../client/TestSnapshotWithAclAsyncAdmin.java      |  58 ++++++
 6 files changed, 194 insertions(+), 314 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 94dc1e0..5952821 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -767,14 +767,42 @@ public interface AsyncAdmin {
    * @param snapshotName name of the snapshot to restore
    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
    */
-  CompletableFuture<Void> restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot);
+  default CompletableFuture<Void> restoreSnapshot(String snapshotName,
+      boolean takeFailSafeSnapshot) {
+    return restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
+  }
+
+  /**
+   * Restore the specified snapshot on the original table. (The table must be disabled) If
+   * 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
+   * executing the restore operation. In case of restore failure, the failsafe snapshot will be
+   * restored. If the restore completes without problem the failsafe snapshot is deleted. The
+   * failsafe snapshot name is configurable by using the property
+   * "hbase.snapshot.restore.failsafe.name".
+   * @param snapshotName name of the snapshot to restore
+   * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
+   * @param restoreAcl <code>true</code> to restore acl of snapshot
+   */
+  CompletableFuture<Void> restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot,
+      boolean restoreAcl);
+
+  /**
+   * Create a new table by cloning the snapshot content.
+   * @param snapshotName name of the snapshot to be cloned
+   * @param tableName name of the table where the snapshot will be restored
+   */
+  default CompletableFuture<Void> cloneSnapshot(String snapshotName, TableName tableName) {
+    return cloneSnapshot(snapshotName, tableName, false);
+  }
 
   /**
    * Create a new table by cloning the snapshot content.
    * @param snapshotName name of the snapshot to be cloned
    * @param tableName name of the table where the snapshot will be restored
+   * @param restoreAcl <code>true</code> to restore acl of snapshot
    */
-  CompletableFuture<Void> cloneSnapshot(String snapshotName, TableName tableName);
+  CompletableFuture<Void> cloneSnapshot(String snapshotName, TableName tableName,
+      boolean restoreAcl);
 
   /**
    * List completed snapshots.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index 78c530e..53eaec8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -467,13 +467,15 @@ class AsyncHBaseAdmin implements AsyncAdmin {
   }
 
   @Override
-  public CompletableFuture<Void> restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot) {
-    return wrap(rawAdmin.restoreSnapshot(snapshotName, takeFailSafeSnapshot));
+  public CompletableFuture<Void> restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot,
+      boolean restoreAcl) {
+    return wrap(rawAdmin.restoreSnapshot(snapshotName, takeFailSafeSnapshot, restoreAcl));
   }
 
   @Override
-  public CompletableFuture<Void> cloneSnapshot(String snapshotName, TableName tableName) {
-    return wrap(rawAdmin.cloneSnapshot(snapshotName, tableName));
+  public CompletableFuture<Void> cloneSnapshot(String snapshotName, TableName tableName,
+      boolean restoreAcl) {
+    return wrap(rawAdmin.cloneSnapshot(snapshotName, tableName, restoreAcl));
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 04ed3c5..1092332 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -1852,8 +1852,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
   }
 
   @Override
-  public CompletableFuture<Void> restoreSnapshot(String snapshotName,
-      boolean takeFailSafeSnapshot) {
+  public CompletableFuture<Void> restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot,
+      boolean restoreAcl) {
     CompletableFuture<Void> future = new CompletableFuture<>();
     addListener(listSnapshots(Pattern.compile(snapshotName)), (snapshotDescriptions, err) -> {
       if (err != null) {
@@ -1881,7 +1881,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
         } else if (!exists) {
           // if table does not exist, then just clone snapshot into new table.
           completeConditionalOnFuture(future,
-            internalRestoreSnapshot(snapshotName, finalTableName));
+            internalRestoreSnapshot(snapshotName, finalTableName, restoreAcl));
         } else {
           addListener(isTableDisabled(finalTableName), (disabled, err4) -> {
             if (err4 != null) {
@@ -1890,7 +1890,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
               future.completeExceptionally(new TableNotDisabledException(finalTableName));
             } else {
               completeConditionalOnFuture(future,
-                restoreSnapshot(snapshotName, finalTableName, takeFailSafeSnapshot));
+                restoreSnapshot(snapshotName, finalTableName, takeFailSafeSnapshot, restoreAcl));
             }
           });
         }
@@ -1900,7 +1900,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
   }
 
   private CompletableFuture<Void> restoreSnapshot(String snapshotName, TableName tableName,
-      boolean takeFailSafeSnapshot) {
+      boolean takeFailSafeSnapshot, boolean restoreAcl) {
     if (takeFailSafeSnapshot) {
       CompletableFuture<Void> future = new CompletableFuture<>();
       // Step.1 Take a snapshot of the current state
@@ -1917,40 +1917,42 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
           future.completeExceptionally(err);
         } else {
           // Step.2 Restore snapshot
-          addListener(internalRestoreSnapshot(snapshotName, tableName), (void2, err2) -> {
-            if (err2 != null) {
-              // Step.3.a Something went wrong during the restore and try to rollback.
-              addListener(internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName),
-                (void3, err3) -> {
+          addListener(internalRestoreSnapshot(snapshotName, tableName, restoreAcl),
+            (void2, err2) -> {
+              if (err2 != null) {
+                // Step.3.a Something went wrong during the restore and try to rollback.
+                addListener(
+                  internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl),
+                  (void3, err3) -> {
+                    if (err3 != null) {
+                      future.completeExceptionally(err3);
+                    } else {
+                      String msg =
+                        "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" +
+                          failSafeSnapshotSnapshotName + " succeeded.";
+                      future.completeExceptionally(new RestoreSnapshotException(msg));
+                    }
+                  });
+              } else {
+                // Step.3.b If the restore is succeeded, delete the pre-restore snapshot.
+                LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
+                addListener(deleteSnapshot(failSafeSnapshotSnapshotName), (ret3, err3) -> {
                   if (err3 != null) {
+                    LOG.error(
+                      "Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName,
+                      err3);
                     future.completeExceptionally(err3);
                   } else {
-                    String msg =
-                      "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" +
-                        failSafeSnapshotSnapshotName + " succeeded.";
-                    future.completeExceptionally(new RestoreSnapshotException(msg));
+                    future.complete(ret3);
                   }
                 });
-            } else {
-              // Step.3.b If the restore is succeeded, delete the pre-restore snapshot.
-              LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
-              addListener(deleteSnapshot(failSafeSnapshotSnapshotName), (ret3, err3) -> {
-                if (err3 != null) {
-                  LOG.error(
-                    "Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName,
-                    err3);
-                  future.completeExceptionally(err3);
-                } else {
-                  future.complete(ret3);
-                }
-              });
-            }
-          });
+              }
+            });
         }
       });
       return future;
     } else {
-      return internalRestoreSnapshot(snapshotName, tableName);
+      return internalRestoreSnapshot(snapshotName, tableName, restoreAcl);
     }
   }
 
@@ -1966,7 +1968,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
   }
 
   @Override
-  public CompletableFuture<Void> cloneSnapshot(String snapshotName, TableName tableName) {
+  public CompletableFuture<Void> cloneSnapshot(String snapshotName, TableName tableName,
+      boolean restoreAcl) {
     CompletableFuture<Void> future = new CompletableFuture<>();
     addListener(tableExists(tableName), (exists, err) -> {
       if (err != null) {
@@ -1974,27 +1977,28 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
       } else if (exists) {
         future.completeExceptionally(new TableExistsException(tableName));
       } else {
-        completeConditionalOnFuture(future, internalRestoreSnapshot(snapshotName, tableName));
+        completeConditionalOnFuture(future,
+          internalRestoreSnapshot(snapshotName, tableName, restoreAcl));
       }
     });
     return future;
   }
 
-  private CompletableFuture<Void> internalRestoreSnapshot(String snapshotName, TableName tableName) {
+  private CompletableFuture<Void> internalRestoreSnapshot(String snapshotName, TableName tableName,
+      boolean restoreAcl) {
     SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder()
-        .setName(snapshotName).setTable(tableName.getNameAsString()).build();
+      .setName(snapshotName).setTable(tableName.getNameAsString()).build();
     try {
       ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
     } catch (IllegalArgumentException e) {
       return failedFuture(e);
     }
-    return waitProcedureResult(this
-        .<Long> newMasterCaller()
-        .action(
-          (controller, stub) -> this.<RestoreSnapshotRequest, RestoreSnapshotResponse, Long> call(
-            controller, stub, RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot)
-                .setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()).build(), (s, c, req,
-                done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId())).call());
+    return waitProcedureResult(this.<Long> newMasterCaller().action((controller, stub) -> this
+      .<RestoreSnapshotRequest, RestoreSnapshotResponse, Long> call(controller, stub,
+        RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup())
+          .setNonce(ng.newNonce()).setRestoreACL(restoreAcl).build(),
+        (s, c, req, done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId()))
+      .call());
   }
 
   @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java
similarity index 74%
copy from hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java
copy to hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java
index a9d4230..9359fcc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/SnapshotWithAclTestBase.java
@@ -20,10 +20,7 @@ package org.apache.hadoop.hbase.client;
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -33,34 +30,24 @@ import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.hbase.security.access.AccessController;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.SecureTestUtil;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.ClassRule;
 import org.junit.Test;
-import org.junit.experimental.categories.Category;
 
-@Category({ MediumTests.class, ClientTests.class })
-public class TestSnapshotWithAcl extends SecureTestUtil {
+public abstract class SnapshotWithAclTestBase extends SecureTestUtil {
 
-  @ClassRule
-  public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestSnapshotWithAcl.class);
-
-  public TableName TEST_TABLE = TableName.valueOf(TEST_UTIL.getRandomUUID().toString());
+  private TableName TEST_TABLE = TableName.valueOf(TEST_UTIL.getRandomUUID().toString());
 
   private static final int ROW_COUNT = 30000;
 
   private static byte[] TEST_FAMILY = Bytes.toBytes("f1");
   private static byte[] TEST_QUALIFIER = Bytes.toBytes("cq");
   private static byte[] TEST_ROW = Bytes.toBytes(0);
-  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static Configuration conf;
-  private static HBaseAdmin admin = null;
+
+  protected static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
   // user is table owner. will have all permissions on table
   private static User USER_OWNER;
@@ -83,10 +70,9 @@ public class TestSnapshotWithAcl extends SecureTestUtil {
     public Object run() throws Exception {
       Get g = new Get(TEST_ROW);
       g.addFamily(TEST_FAMILY);
-      try (Connection conn = ConnectionFactory.createConnection(conf)) {
-        try (Table t = conn.getTable(tableName)) {
-          t.get(g);
-        }
+      try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
+        Table t = conn.getTable(tableName)) {
+        t.get(g);
       }
       return null;
     }
@@ -103,19 +89,17 @@ public class TestSnapshotWithAcl extends SecureTestUtil {
     public Object run() throws Exception {
       Put p = new Put(TEST_ROW);
       p.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(0));
-      try (Connection conn = ConnectionFactory.createConnection(conf)) {
-        try (Table t = conn.getTable(tableName)) {
-          t.put(p);
-        }
+      try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
+        Table t = conn.getTable(tableName)) {
+        t.put(p);
       }
       return null;
     }
   }
 
-
   @BeforeClass
   public static void setupBeforeClass() throws Exception {
-    conf = TEST_UTIL.getConfiguration();
+    Configuration conf = TEST_UTIL.getConfiguration();
     // Enable security
     enableSecurity(conf);
     conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName());
@@ -126,7 +110,7 @@ public class TestSnapshotWithAcl extends SecureTestUtil {
     TEST_UTIL.startMiniCluster();
     TEST_UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME);
     MasterCoprocessorHost cpHost =
-        TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
+      TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
     cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
 
     USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
@@ -137,24 +121,21 @@ public class TestSnapshotWithAcl extends SecureTestUtil {
 
   @Before
   public void setUp() throws Exception {
-    admin = TEST_UTIL.getHBaseAdmin();
-    HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
-    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
-    hcd.setMaxVersions(100);
-    htd.addFamily(hcd);
-    htd.setOwner(USER_OWNER);
-    admin.createTable(htd, new byte[][] { Bytes.toBytes("s") });
+    TEST_UTIL.createTable(TableDescriptorBuilder.newBuilder(TEST_TABLE)
+      .setColumnFamily(
+        ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build())
+      .setOwner(USER_OWNER).build(), new byte[][] { Bytes.toBytes("s") });
     TEST_UTIL.waitTableEnabled(TEST_TABLE);
 
     grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null,
-            Permission.Action.READ, Permission.Action.WRITE);
+      Permission.Action.READ, Permission.Action.WRITE);
 
     grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
-            Permission.Action.READ);
+      Permission.Action.READ);
   }
 
   private void loadData() throws IOException {
-    try (Connection conn = ConnectionFactory.createConnection(conf)) {
+    try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
       try (Table t = conn.getTable(TEST_TABLE)) {
         for (int i = 0; i < ROW_COUNT; i++) {
           Put put = new Put(Bytes.toBytes(i));
@@ -171,21 +152,25 @@ public class TestSnapshotWithAcl extends SecureTestUtil {
   }
 
   private void verifyRows(TableName tableName) throws IOException {
-    try (Connection conn = ConnectionFactory.createConnection(conf)) {
-      try (Table t = conn.getTable(tableName)) {
-        try (ResultScanner scanner = t.getScanner(new Scan())) {
-          Result result;
-          int rowCount = 0;
-          while ((result = scanner.next()) != null) {
-            byte[] value = result.getValue(TEST_FAMILY, TEST_QUALIFIER);
-            Assert.assertArrayEquals(value, Bytes.toBytes(rowCount++));
-          }
-          Assert.assertEquals(ROW_COUNT, rowCount);
-        }
+    try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
+      Table t = conn.getTable(tableName); ResultScanner scanner = t.getScanner(new Scan())) {
+      Result result;
+      int rowCount = 0;
+      while ((result = scanner.next()) != null) {
+        byte[] value = result.getValue(TEST_FAMILY, TEST_QUALIFIER);
+        Assert.assertArrayEquals(value, Bytes.toBytes(rowCount++));
       }
+      Assert.assertEquals(ROW_COUNT, rowCount);
     }
   }
 
+  protected abstract void snapshot(String snapshotName, TableName tableName) throws Exception;
+
+  protected abstract void cloneSnapshot(String snapshotName, TableName tableName,
+      boolean restoreAcl) throws Exception;
+
+  protected abstract void restoreSnapshot(String snapshotName, boolean restoreAcl) throws Exception;
+
   @Test
   public void testRestoreSnapshot() throws Exception {
     verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW);
@@ -197,11 +182,11 @@ public class TestSnapshotWithAcl extends SecureTestUtil {
     verifyRows(TEST_TABLE);
 
     String snapshotName1 = TEST_UTIL.getRandomUUID().toString();
-    admin.snapshot(snapshotName1, TEST_TABLE);
+    snapshot(snapshotName1, TEST_TABLE);
 
     // clone snapshot with restoreAcl true.
     TableName tableName1 = TableName.valueOf(TEST_UTIL.getRandomUUID().toString());
-    admin.cloneSnapshot(snapshotName1, tableName1, true);
+    cloneSnapshot(snapshotName1, tableName1, true);
     verifyRows(tableName1);
     verifyAllowed(new AccessReadAction(tableName1), USER_OWNER, USER_RO, USER_RW);
     verifyDenied(new AccessReadAction(tableName1), USER_NONE);
@@ -210,7 +195,7 @@ public class TestSnapshotWithAcl extends SecureTestUtil {
 
     // clone snapshot with restoreAcl false.
     TableName tableName2 = TableName.valueOf(TEST_UTIL.getRandomUUID().toString());
-    admin.cloneSnapshot(snapshotName1, tableName2, false);
+    cloneSnapshot(snapshotName1, tableName2, false);
     verifyRows(tableName2);
     verifyAllowed(new AccessReadAction(tableName2), USER_OWNER);
     verifyDenied(new AccessReadAction(tableName2), USER_NONE, USER_RO, USER_RW);
@@ -226,18 +211,18 @@ public class TestSnapshotWithAcl extends SecureTestUtil {
     verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
 
     // restore snapshot with restoreAcl false.
-    admin.disableTable(TEST_TABLE);
-    admin.restoreSnapshot(snapshotName1, false, false);
-    admin.enableTable(TEST_TABLE);
+    TEST_UTIL.getAdmin().disableTable(TEST_TABLE);
+    restoreSnapshot(snapshotName1, false);
+    TEST_UTIL.getAdmin().enableTable(TEST_TABLE);
     verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RW);
     verifyDenied(new AccessReadAction(TEST_TABLE), USER_RO, USER_NONE);
     verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
     verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
 
     // restore snapshot with restoreAcl true.
-    admin.disableTable(TEST_TABLE);
-    admin.restoreSnapshot(snapshotName1, false, true);
-    admin.enableTable(TEST_TABLE);
+    TEST_UTIL.getAdmin().disableTable(TEST_TABLE);
+    restoreSnapshot(snapshotName1, true);
+    TEST_UTIL.getAdmin().enableTable(TEST_TABLE);
     verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW);
     verifyDenied(new AccessReadAction(TEST_TABLE), USER_NONE);
     verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java
index a9d4230..614c1fe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java
@@ -17,230 +17,33 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.access.AccessControlConstants;
-import org.apache.hadoop.hbase.security.access.AccessControlLists;
-import org.apache.hadoop.hbase.security.access.AccessController;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.security.access.SecureTestUtil;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.ClassRule;
-import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category({ MediumTests.class, ClientTests.class })
-public class TestSnapshotWithAcl extends SecureTestUtil {
+public class TestSnapshotWithAcl extends SnapshotWithAclTestBase {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestSnapshotWithAcl.class);
+    HBaseClassTestRule.forClass(TestSnapshotWithAcl.class);
 
-  public TableName TEST_TABLE = TableName.valueOf(TEST_UTIL.getRandomUUID().toString());
-
-  private static final int ROW_COUNT = 30000;
-
-  private static byte[] TEST_FAMILY = Bytes.toBytes("f1");
-  private static byte[] TEST_QUALIFIER = Bytes.toBytes("cq");
-  private static byte[] TEST_ROW = Bytes.toBytes(0);
-  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static Configuration conf;
-  private static HBaseAdmin admin = null;
-
-  // user is table owner. will have all permissions on table
-  private static User USER_OWNER;
-  // user with rw permissions on column family.
-  private static User USER_RW;
-  // user with read-only permissions
-  private static User USER_RO;
-  // user with none permissions
-  private static User USER_NONE;
-
-  static class AccessReadAction implements AccessTestAction {
-
-    private TableName tableName;
-
-    public AccessReadAction(TableName tableName) {
-      this.tableName = tableName;
-    }
-
-    @Override
-    public Object run() throws Exception {
-      Get g = new Get(TEST_ROW);
-      g.addFamily(TEST_FAMILY);
-      try (Connection conn = ConnectionFactory.createConnection(conf)) {
-        try (Table t = conn.getTable(tableName)) {
-          t.get(g);
-        }
-      }
-      return null;
-    }
-  }
-
-  static class AccessWriteAction implements AccessTestAction {
-    private TableName tableName;
-
-    public AccessWriteAction(TableName tableName) {
-      this.tableName = tableName;
-    }
-
-    @Override
-    public Object run() throws Exception {
-      Put p = new Put(TEST_ROW);
-      p.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(0));
-      try (Connection conn = ConnectionFactory.createConnection(conf)) {
-        try (Table t = conn.getTable(tableName)) {
-          t.put(p);
-        }
-      }
-      return null;
-    }
-  }
-
-
-  @BeforeClass
-  public static void setupBeforeClass() throws Exception {
-    conf = TEST_UTIL.getConfiguration();
-    // Enable security
-    enableSecurity(conf);
-    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName());
-    // Verify enableSecurity sets up what we require
-    verifyConfiguration(conf);
-    // Enable EXEC permission checking
-    conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true);
-    TEST_UTIL.startMiniCluster();
-    TEST_UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME);
-    MasterCoprocessorHost cpHost =
-        TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
-    cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
-
-    USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
-    USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);
-    USER_RO = User.createUserForTesting(conf, "rouser", new String[0]);
-    USER_NONE = User.createUserForTesting(conf, "usernone", new String[0]);
+  @Override
+  protected void snapshot(String snapshotName, TableName tableName) throws Exception {
+    TEST_UTIL.getAdmin().snapshot(snapshotName, tableName);
   }
 
-  @Before
-  public void setUp() throws Exception {
-    admin = TEST_UTIL.getHBaseAdmin();
-    HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
-    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
-    hcd.setMaxVersions(100);
-    htd.addFamily(hcd);
-    htd.setOwner(USER_OWNER);
-    admin.createTable(htd, new byte[][] { Bytes.toBytes("s") });
-    TEST_UTIL.waitTableEnabled(TEST_TABLE);
-
-    grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null,
-            Permission.Action.READ, Permission.Action.WRITE);
-
-    grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
-            Permission.Action.READ);
-  }
-
-  private void loadData() throws IOException {
-    try (Connection conn = ConnectionFactory.createConnection(conf)) {
-      try (Table t = conn.getTable(TEST_TABLE)) {
-        for (int i = 0; i < ROW_COUNT; i++) {
-          Put put = new Put(Bytes.toBytes(i));
-          put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
-          t.put(put);
-        }
-      }
-    }
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  private void verifyRows(TableName tableName) throws IOException {
-    try (Connection conn = ConnectionFactory.createConnection(conf)) {
-      try (Table t = conn.getTable(tableName)) {
-        try (ResultScanner scanner = t.getScanner(new Scan())) {
-          Result result;
-          int rowCount = 0;
-          while ((result = scanner.next()) != null) {
-            byte[] value = result.getValue(TEST_FAMILY, TEST_QUALIFIER);
-            Assert.assertArrayEquals(value, Bytes.toBytes(rowCount++));
-          }
-          Assert.assertEquals(ROW_COUNT, rowCount);
-        }
-      }
-    }
+  @Override
+  protected void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl)
+      throws Exception {
+    TEST_UTIL.getAdmin().cloneSnapshot(snapshotName, tableName, restoreAcl);
   }
 
-  @Test
-  public void testRestoreSnapshot() throws Exception {
-    verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW);
-    verifyDenied(new AccessReadAction(TEST_TABLE), USER_NONE);
-    verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
-    verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
-
-    loadData();
-    verifyRows(TEST_TABLE);
-
-    String snapshotName1 = TEST_UTIL.getRandomUUID().toString();
-    admin.snapshot(snapshotName1, TEST_TABLE);
-
-    // clone snapshot with restoreAcl true.
-    TableName tableName1 = TableName.valueOf(TEST_UTIL.getRandomUUID().toString());
-    admin.cloneSnapshot(snapshotName1, tableName1, true);
-    verifyRows(tableName1);
-    verifyAllowed(new AccessReadAction(tableName1), USER_OWNER, USER_RO, USER_RW);
-    verifyDenied(new AccessReadAction(tableName1), USER_NONE);
-    verifyAllowed(new AccessWriteAction(tableName1), USER_OWNER, USER_RW);
-    verifyDenied(new AccessWriteAction(tableName1), USER_RO, USER_NONE);
-
-    // clone snapshot with restoreAcl false.
-    TableName tableName2 = TableName.valueOf(TEST_UTIL.getRandomUUID().toString());
-    admin.cloneSnapshot(snapshotName1, tableName2, false);
-    verifyRows(tableName2);
-    verifyAllowed(new AccessReadAction(tableName2), USER_OWNER);
-    verifyDenied(new AccessReadAction(tableName2), USER_NONE, USER_RO, USER_RW);
-    verifyAllowed(new AccessWriteAction(tableName2), USER_OWNER);
-    verifyDenied(new AccessWriteAction(tableName2), USER_RO, USER_RW, USER_NONE);
-
-    // remove read permission for USER_RO.
-    revokeFromTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
-      Permission.Action.READ);
-    verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RW);
-    verifyDenied(new AccessReadAction(TEST_TABLE), USER_RO, USER_NONE);
-    verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
-    verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
-
-    // restore snapshot with restoreAcl false.
-    admin.disableTable(TEST_TABLE);
-    admin.restoreSnapshot(snapshotName1, false, false);
-    admin.enableTable(TEST_TABLE);
-    verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RW);
-    verifyDenied(new AccessReadAction(TEST_TABLE), USER_RO, USER_NONE);
-    verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
-    verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
-
-    // restore snapshot with restoreAcl true.
-    admin.disableTable(TEST_TABLE);
-    admin.restoreSnapshot(snapshotName1, false, true);
-    admin.enableTable(TEST_TABLE);
-    verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW);
-    verifyDenied(new AccessReadAction(TEST_TABLE), USER_NONE);
-    verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
-    verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
+  @Override
+  protected void restoreSnapshot(String snapshotName, boolean restoreAcl) throws Exception {
+    TEST_UTIL.getAdmin().restoreSnapshot(snapshotName, false, restoreAcl);
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java
new file mode 100644
index 0000000..792f0e3
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAclAsyncAdmin.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.ClassRule;
+import org.junit.experimental.categories.Category;
+
+@Category({ MediumTests.class, ClientTests.class })
+public class TestSnapshotWithAclAsyncAdmin extends SnapshotWithAclTestBase {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestSnapshotWithAclAsyncAdmin.class);
+
+  @Override
+  protected void snapshot(String snapshotName, TableName tableName) throws Exception {
+    try (AsyncConnection conn =
+      ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) {
+      conn.getAdmin().snapshot(snapshotName, tableName).get();
+    }
+  }
+
+  @Override
+  protected void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl)
+      throws Exception {
+    try (AsyncConnection conn =
+      ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) {
+      conn.getAdmin().cloneSnapshot(snapshotName, tableName, restoreAcl).get();
+    }
+  }
+
+  @Override
+  protected void restoreSnapshot(String snapshotName, boolean restoreAcl) throws Exception {
+    try (AsyncConnection conn =
+      ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) {
+      conn.getAdmin().restoreSnapshot(snapshotName, false, restoreAcl).get();
+    }
+  }
+}


[hbase] 09/24: Revert "HBASE-21874 Bucket cache on Persistent memory"

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit c19280b4267953f0db321f3e5276976eb8e39ee9
Author: Sean Busbey <bu...@apache.org>
AuthorDate: Thu Mar 7 08:49:41 2019 -0600

    Revert "HBASE-21874 Bucket cache on Persistent memory"
    
    This reverts commit 763202d48eaf8163f26840ea206a63b125aa3770.
    
    bad signed-off-by line
---
 hbase-common/src/main/resources/hbase-default.xml  |  6 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 10 +--
 .../hfile/bucket/ExclusiveMemoryMmapIOEngine.java  | 50 ---------------
 .../{FileMmapIOEngine.java => FileMmapEngine.java} | 73 ++++++++++++----------
 .../io/hfile/bucket/SharedMemoryMmapIOEngine.java  | 64 -------------------
 ...moryMmapEngine.java => TestFileMmapEngine.java} |  8 +--
 src/main/asciidoc/_chapters/architecture.adoc      |  2 -
 src/main/asciidoc/_chapters/hbase-default.adoc     | 11 ++--
 8 files changed, 50 insertions(+), 174 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index c8ab48a..ad6f645 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -925,10 +925,8 @@ possible configurations would overwhelm and obscure the important.
     <name>hbase.bucketcache.ioengine</name>
     <value></value>
     <description>Where to store the contents of the bucketcache. One of: offheap,
-    file, files, mmap or pmem. If a file or files, set it to file(s):PATH_TO_FILE.
-    mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE. 'pmem'
-    is bucket cache over a file on the persistent memory device.
-    Use pmem:PATH_TO_FILE.
+    file, files or mmap. If a file or files, set it to file(s):PATH_TO_FILE.
+    mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE.
     See http://hbase.apache.org/book.html#offheap.blockcache for more information.
     </description>
   </property>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 470c4f7..5a21bad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -379,15 +379,7 @@ public class BucketCache implements BlockCache, HeapSize {
     } else if (ioEngineName.startsWith("offheap")) {
       return new ByteBufferIOEngine(capacity);
     } else if (ioEngineName.startsWith("mmap:")) {
-      return new ExclusiveMemoryMmapIOEngine(ioEngineName.substring(5), capacity);
-    } else if (ioEngineName.startsWith("pmem:")) {
-      // This mode of bucket cache creates an IOEngine over a file on the persistent memory
-      // device. Since the persistent memory device has its own address space the contents
-      // mapped to this address space does not get swapped out like in the case of mmapping
-      // on to DRAM. Hence the cells created out of the hfile blocks in the pmem bucket cache
-      // can be directly referred to without having to copy them onheap. Once the RPC is done,
-      // the blocks can be returned back as in case of ByteBufferIOEngine.
-      return new SharedMemoryMmapIOEngine(ioEngineName.substring(5), capacity);
+      return new FileMmapEngine(ioEngineName.substring(5), capacity);
     } else {
       throw new IllegalArgumentException(
           "Don't understand io engine name for cache- prefix with file:, files:, mmap: or offheap");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java
deleted file mode 100644
index 8b024f0..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.io.hfile.bucket;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-
-import org.apache.hadoop.hbase.io.hfile.Cacheable;
-import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-import org.apache.hadoop.hbase.nio.SingleByteBuff;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * IO engine that stores data to a file on the local block device using memory mapping
- * mechanism
- */
-@InterfaceAudience.Private
-public class ExclusiveMemoryMmapIOEngine extends FileMmapIOEngine {
-  static final Logger LOG = LoggerFactory.getLogger(ExclusiveMemoryMmapIOEngine.class);
-
-  public ExclusiveMemoryMmapIOEngine(String filePath, long capacity) throws IOException {
-    super(filePath, capacity);
-  }
-
-  @Override
-  public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
-      throws IOException {
-    byte[] dst = new byte[length];
-    bufferArray.getMultiple(offset, length, dst);
-    return deserializer.deserialize(new SingleByteBuff(ByteBuffer.wrap(dst)), true,
-      MemoryType.EXCLUSIVE);
-  }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java
similarity index 66%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java
index 9580efe..82f42cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java
@@ -1,19 +1,20 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
+ * Copyright The Apache Software Foundation
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
  */
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
@@ -23,31 +24,33 @@ import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.io.hfile.Cacheable;
+import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
 import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
 import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.util.ByteBufferAllocator;
 import org.apache.hadoop.hbase.util.ByteBufferArray;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
- * IO engine that stores data to a file on the specified file system using memory mapping
+ * IO engine that stores data to a file on the local file system using memory mapping
  * mechanism
  */
 @InterfaceAudience.Private
-public abstract class FileMmapIOEngine implements IOEngine {
-  static final Logger LOG = LoggerFactory.getLogger(FileMmapIOEngine.class);
+public class FileMmapEngine implements IOEngine {
+  static final Logger LOG = LoggerFactory.getLogger(FileMmapEngine.class);
 
-  protected final String path;
-  protected long size;
-  protected ByteBufferArray bufferArray;
+  private final String path;
+  private long size;
+  private ByteBufferArray bufferArray;
   private final FileChannel fileChannel;
   private RandomAccessFile raf = null;
 
-  public FileMmapIOEngine(String filePath, long capacity) throws IOException {
+  public FileMmapEngine(String filePath, long capacity) throws IOException {
     this.path = filePath;
     this.size = capacity;
     long fileSize = 0;
@@ -61,15 +64,13 @@ public abstract class FileMmapIOEngine implements IOEngine {
       LOG.error("Can't create bucket cache file " + filePath, fex);
       throw fex;
     } catch (IOException ioex) {
-      LOG.error(
-        "Can't extend bucket cache file; insufficient space for " + StringUtils.byteDesc(fileSize),
-        ioex);
+      LOG.error("Can't extend bucket cache file; insufficient space for "
+          + StringUtils.byteDesc(fileSize), ioex);
       shutdown();
       throw ioex;
     }
     ByteBufferAllocator allocator = new ByteBufferAllocator() {
       AtomicInteger pos = new AtomicInteger(0);
-
       @Override
       public ByteBuffer allocate(long size) throws IOException {
         ByteBuffer buffer = fileChannel.map(java.nio.channels.FileChannel.MapMode.READ_WRITE,
@@ -86,8 +87,8 @@ public abstract class FileMmapIOEngine implements IOEngine {
 
   @Override
   public String toString() {
-    return "ioengine=" + this.getClass().getSimpleName() + ", path=" + this.path + ", size="
-        + String.format("%,d", this.size);
+    return "ioengine=" + this.getClass().getSimpleName() + ", path=" + this.path +
+      ", size=" + String.format("%,d", this.size);
   }
 
   /**
@@ -96,13 +97,17 @@ public abstract class FileMmapIOEngine implements IOEngine {
    */
   @Override
   public boolean isPersistent() {
-    // TODO : HBASE-21981 needed for persistence to really work
     return true;
   }
 
   @Override
-  public abstract Cacheable read(long offset, int length,
-      CacheableDeserializer<Cacheable> deserializer) throws IOException;
+  public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
+      throws IOException {
+    byte[] dst = new byte[length];
+    bufferArray.getMultiple(offset, length, dst);
+    return deserializer.deserialize(new SingleByteBuff(ByteBuffer.wrap(dst)), true,
+      MemoryType.EXCLUSIVE);
+  }
 
   /**
    * Transfers data from the given byte buffer to file
@@ -114,7 +119,7 @@ public abstract class FileMmapIOEngine implements IOEngine {
   public void write(ByteBuffer srcBuffer, long offset) throws IOException {
     assert srcBuffer.hasArray();
     bufferArray.putMultiple(offset, srcBuffer.remaining(), srcBuffer.array(),
-      srcBuffer.arrayOffset());
+        srcBuffer.arrayOffset());
   }
 
   @Override
@@ -122,9 +127,9 @@ public abstract class FileMmapIOEngine implements IOEngine {
     // This singleByteBuff can be considered to be array backed
     assert srcBuffer.hasArray();
     bufferArray.putMultiple(offset, srcBuffer.remaining(), srcBuffer.array(),
-      srcBuffer.arrayOffset());
-  }
+        srcBuffer.arrayOffset());
 
+  }
   /**
    * Sync the data to file after writing
    * @throws IOException
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java
deleted file mode 100644
index b6a7a57..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile.bucket;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.io.hfile.Cacheable;
-import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-import org.apache.hadoop.hbase.nio.ByteBuff;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * IO engine that stores data in pmem devices such as DCPMM. This engine also mmaps the file from
- * the given path. But note that this path has to be a path on the pmem device so that when mmapped
- * the file's address is mapped to the Pmem's address space and not in the DRAM. Since this address
- * space is exclusive for the Pmem device there is no swapping out of the mmapped contents that
- * generally happens when DRAM's free space is not enough to hold the specified file's mmapped
- * contents. This gives us the option of using the {@code MemoryType#SHARED} type when serving the
- * data from this pmem address space. We need not copy the blocks to the onheap space as we need to
- * do for the case of {@code ExclusiveMemoryMmapIOEngine}.
- */
-@InterfaceAudience.Private
-public class SharedMemoryMmapIOEngine extends FileMmapIOEngine {
-
-  // TODO this will support only one path over Pmem. To make use of multiple Pmem devices mounted,
-  // we need to support multiple paths like files IOEngine. Support later.
-  public SharedMemoryMmapIOEngine(String filePath, long capacity) throws IOException {
-    super(filePath, capacity);
-  }
-
-  @Override
-  public boolean usesSharedMemory() {
-    return true;
-  }
-
-  @Override
-  public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
-      throws IOException {
-    ByteBuff dstBuffer = bufferArray.asSubByteBuff(offset, length);
-    // Here the buffer that is created directly refers to the buffer in the actual buckets.
-    // When any cell is referring to the blocks created out of these buckets then it means that
-    // those cells are referring to a shared memory area which if evicted by the BucketCache would
-    // lead to corruption of results. Hence we set the type of the buffer as SHARED_MEMORY
-    // so that the readers using this block are aware of this fact and do the necessary action
-    // to prevent eviction till the results are either consumed or copied
-    return deserializer.deserialize(dstBuffer, true, MemoryType.SHARED);
-  }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestExclusiveMemoryMmapEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileMmapEngine.java
similarity index 90%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestExclusiveMemoryMmapEngine.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileMmapEngine.java
index d0d8c8a..2748d80 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestExclusiveMemoryMmapEngine.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileMmapEngine.java
@@ -32,21 +32,21 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
- * Basic test for {@link ExclusiveMemoryMmapIOEngine}
+ * Basic test for {@link FileMmapEngine}
  */
 @Category({IOTests.class, SmallTests.class})
-public class TestExclusiveMemoryMmapEngine {
+public class TestFileMmapEngine {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestExclusiveMemoryMmapEngine.class);
+      HBaseClassTestRule.forClass(TestFileMmapEngine.class);
 
   @Test
   public void testFileMmapEngine() throws IOException {
     int size = 2 * 1024 * 1024; // 2 MB
     String filePath = "testFileMmapEngine";
     try {
-      ExclusiveMemoryMmapIOEngine fileMmapEngine = new ExclusiveMemoryMmapIOEngine(filePath, size);
+      FileMmapEngine fileMmapEngine = new FileMmapEngine(filePath, size);
       for (int i = 0; i < 50; i++) {
         int len = (int) Math.floor(Math.random() * 100);
         long offset = (long) Math.floor(Math.random() * size % (size - len));
diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc
index 9a5eba9..c250aa8 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -733,8 +733,6 @@ See configurations, sizings, current usage, time-in-the-cache, and even detail o
 
 `LruBlockCache` is the original implementation, and is entirely within the Java heap.
 `BucketCache` is optional and mainly intended for keeping block cache data off-heap, although `BucketCache` can also be a file-backed cache.
- In file-backed we can either use it in the file mode or the mmaped mode.
- We also have pmem mode where the bucket cache resides on the persistent memory device.
 
 When you enable BucketCache, you are enabling a two tier caching system. We used to describe the
 tiers as "L1" and "L2" but have deprecated this terminology as of hbase-2.0.0. The "L1" cache referred to an
diff --git a/src/main/asciidoc/_chapters/hbase-default.adoc b/src/main/asciidoc/_chapters/hbase-default.adoc
index ccd5f6f..f809f28 100644
--- a/src/main/asciidoc/_chapters/hbase-default.adoc
+++ b/src/main/asciidoc/_chapters/hbase-default.adoc
@@ -1197,13 +1197,10 @@ When the size of a leaf-level, intermediate-level, or root-level
 *`hbase.bucketcache.ioengine`*::
 +
 .Description
-Where to store the contents of the bucketcache. One of: offheap,
-    file, files, mmap or pmem. If a file or files, set it to file(s):PATH_TO_FILE.
-    mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE.
-    'pmem' is bucket cache over a file on the persistent memory device.
-    Use pmem:PATH_TO_FILE.
-    See https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
-    for more information.
+Where to store the contents of the bucketcache. One of: onheap,
+      offheap, or file. If a file, set it to file:PATH_TO_FILE.
+      See https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
+      for more information.
 
 +
 .Default


[hbase] 08/24: Revert "SE-22006 Fix branch-2.1 findbugs warning; causes nightly show as failed."

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 1b1352aad43785fa1101683b27363ed8c41389c6
Author: Sean Busbey <bu...@apache.org>
AuthorDate: Thu Mar 7 08:49:07 2019 -0600

    Revert "SE-22006 Fix branch-2.1 findbugs warning; causes nightly show as failed."
    
    This reverts commit 18d02ffd5df01433d9e15c7a84fb225adf2a22e0.
    
    bad commit message
---
 .../java/org/apache/hadoop/hbase/regionserver/HRegionServer.java     | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index e407285..f983882 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1613,8 +1613,9 @@ public class HRegionServer extends HasThread implements
           MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT);
       int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT);
       // init the chunkCreator
-      ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage,
-        initialCountPercentage, this.hMemManager);
+      ChunkCreator chunkCreator =
+          ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage,
+      initialCountPercentage, this.hMemManager);
     }
   }
 


[hbase] 15/24: HBASE-22010 Upgrade to 2.2 section header can't use spaces.

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit aa4d3b794244a7e8ad12512e339e30b8e3c9bd2c
Author: Sean Busbey <bu...@apache.org>
AuthorDate: Thu Mar 7 20:44:20 2019 -0600

    HBASE-22010 Upgrade to 2.2 section header can't use spaces.
    
    Signed-off-by: Guanghao Zhang <zg...@apache.org>
---
 src/main/asciidoc/_chapters/upgrading.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/main/asciidoc/_chapters/upgrading.adoc b/src/main/asciidoc/_chapters/upgrading.adoc
index 2a33e42..d8aada4 100644
--- a/src/main/asciidoc/_chapters/upgrading.adoc
+++ b/src/main/asciidoc/_chapters/upgrading.adoc
@@ -314,7 +314,7 @@ Quitting...
 
 == Upgrade Paths
 
-[[upgrade 2.2]]
+[[upgrade2.2]]
 === Upgrade from 2.0 or 2.1 to 2.2+
 
 HBase 2.2+ uses a new Procedure form assiging/unassigning/moving Regions. It does not process HBase 2.1 and 2.0's Unassign/Assign Procedure types. Upgrade requires that we first drain the Master Procedure Store of old style Procedures before starting the new 2.2 Master. So you need to make sure that before you kill the old version (2.0 or 2.1) Master, there is no region in transition. And once the new version (2.2+) Master is up, you can rolling upgrade RegionServers one by one.


[hbase] 07/24: HBASE-22000 Deprecated isTableAvailable with splitKeys

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 06ca2ce59429392c448228eb8f7936b03a03cee1
Author: johnhomsea <51...@qq.com>
AuthorDate: Wed Mar 6 22:55:44 2019 +0800

    HBASE-22000 Deprecated isTableAvailable with splitKeys
    
    Signed-off-by: Duo Zhang <zh...@apache.org>
---
 .../src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java        | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 7284ae4..94dc1e0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -219,7 +219,9 @@ public interface AsyncAdmin {
    * {@link CompletableFuture}.
    * @param tableName name of table to check
    * @param splitKeys keys to check if the table has been created with all split keys
+   * @deprecated Since 2.2.0. Will be removed in 3.0.0. Use {@link #isTableAvailable(TableName)}
    */
+  @Deprecated
   CompletableFuture<Boolean> isTableAvailable(TableName tableName, byte[][] splitKeys);
 
   /**


[hbase] 04/24: HBASE-21874 Bucket cache on Persistent memory

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 9ffb9c539c0b1833335630a9884025ad3bd2e3bc
Author: ramkrishna <ra...@apache.org>
AuthorDate: Wed Mar 6 22:09:51 2019 +0530

    HBASE-21874 Bucket cache on Persistent memory
    
    Signed-off-by: Anoop John <an...@gmail.com>, Sean Busbey
    <bu...@apache.org>
---
 hbase-common/src/main/resources/hbase-default.xml  |  6 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 10 ++-
 .../hfile/bucket/ExclusiveMemoryMmapIOEngine.java  | 50 +++++++++++++++
 .../{FileMmapEngine.java => FileMmapIOEngine.java} | 73 ++++++++++------------
 .../io/hfile/bucket/SharedMemoryMmapIOEngine.java  | 64 +++++++++++++++++++
 ...ine.java => TestExclusiveMemoryMmapEngine.java} |  8 +--
 src/main/asciidoc/_chapters/architecture.adoc      |  2 +
 src/main/asciidoc/_chapters/hbase-default.adoc     | 11 ++--
 8 files changed, 174 insertions(+), 50 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index ad6f645..c8ab48a 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -925,8 +925,10 @@ possible configurations would overwhelm and obscure the important.
     <name>hbase.bucketcache.ioengine</name>
     <value></value>
     <description>Where to store the contents of the bucketcache. One of: offheap,
-    file, files or mmap. If a file or files, set it to file(s):PATH_TO_FILE.
-    mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE.
+    file, files, mmap or pmem. If a file or files, set it to file(s):PATH_TO_FILE.
+    mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE. 'pmem'
+    is bucket cache over a file on the persistent memory device.
+    Use pmem:PATH_TO_FILE.
     See http://hbase.apache.org/book.html#offheap.blockcache for more information.
     </description>
   </property>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 5a21bad..470c4f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -379,7 +379,15 @@ public class BucketCache implements BlockCache, HeapSize {
     } else if (ioEngineName.startsWith("offheap")) {
       return new ByteBufferIOEngine(capacity);
     } else if (ioEngineName.startsWith("mmap:")) {
-      return new FileMmapEngine(ioEngineName.substring(5), capacity);
+      return new ExclusiveMemoryMmapIOEngine(ioEngineName.substring(5), capacity);
+    } else if (ioEngineName.startsWith("pmem:")) {
+      // This mode of bucket cache creates an IOEngine over a file on the persistent memory
+      // device. Since the persistent memory device has its own address space the contents
+      // mapped to this address space does not get swapped out like in the case of mmapping
+      // on to DRAM. Hence the cells created out of the hfile blocks in the pmem bucket cache
+      // can be directly referred to without having to copy them onheap. Once the RPC is done,
+      // the blocks can be returned back as in case of ByteBufferIOEngine.
+      return new SharedMemoryMmapIOEngine(ioEngineName.substring(5), capacity);
     } else {
       throw new IllegalArgumentException(
           "Don't understand io engine name for cache- prefix with file:, files:, mmap: or offheap");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java
new file mode 100644
index 0000000..8b024f0
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile.bucket;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.hbase.io.hfile.Cacheable;
+import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * IO engine that stores data to a file on the local block device using memory mapping
+ * mechanism
+ */
+@InterfaceAudience.Private
+public class ExclusiveMemoryMmapIOEngine extends FileMmapIOEngine {
+  static final Logger LOG = LoggerFactory.getLogger(ExclusiveMemoryMmapIOEngine.class);
+
+  public ExclusiveMemoryMmapIOEngine(String filePath, long capacity) throws IOException {
+    super(filePath, capacity);
+  }
+
+  @Override
+  public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
+      throws IOException {
+    byte[] dst = new byte[length];
+    bufferArray.getMultiple(offset, length, dst);
+    return deserializer.deserialize(new SingleByteBuff(ByteBuffer.wrap(dst)), true,
+      MemoryType.EXCLUSIVE);
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
similarity index 66%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
index 82f42cd..9580efe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
@@ -1,20 +1,19 @@
 /**
- * Copyright The Apache Software Foundation
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
  *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
@@ -24,33 +23,31 @@ import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.io.hfile.Cacheable;
-import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
 import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
 import org.apache.hadoop.hbase.nio.ByteBuff;
-import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.util.ByteBufferAllocator;
 import org.apache.hadoop.hbase.util.ByteBufferArray;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
- * IO engine that stores data to a file on the local file system using memory mapping
+ * IO engine that stores data to a file on the specified file system using memory mapping
  * mechanism
  */
 @InterfaceAudience.Private
-public class FileMmapEngine implements IOEngine {
-  static final Logger LOG = LoggerFactory.getLogger(FileMmapEngine.class);
+public abstract class FileMmapIOEngine implements IOEngine {
+  static final Logger LOG = LoggerFactory.getLogger(FileMmapIOEngine.class);
 
-  private final String path;
-  private long size;
-  private ByteBufferArray bufferArray;
+  protected final String path;
+  protected long size;
+  protected ByteBufferArray bufferArray;
   private final FileChannel fileChannel;
   private RandomAccessFile raf = null;
 
-  public FileMmapEngine(String filePath, long capacity) throws IOException {
+  public FileMmapIOEngine(String filePath, long capacity) throws IOException {
     this.path = filePath;
     this.size = capacity;
     long fileSize = 0;
@@ -64,13 +61,15 @@ public class FileMmapEngine implements IOEngine {
       LOG.error("Can't create bucket cache file " + filePath, fex);
       throw fex;
     } catch (IOException ioex) {
-      LOG.error("Can't extend bucket cache file; insufficient space for "
-          + StringUtils.byteDesc(fileSize), ioex);
+      LOG.error(
+        "Can't extend bucket cache file; insufficient space for " + StringUtils.byteDesc(fileSize),
+        ioex);
       shutdown();
       throw ioex;
     }
     ByteBufferAllocator allocator = new ByteBufferAllocator() {
       AtomicInteger pos = new AtomicInteger(0);
+
       @Override
       public ByteBuffer allocate(long size) throws IOException {
         ByteBuffer buffer = fileChannel.map(java.nio.channels.FileChannel.MapMode.READ_WRITE,
@@ -87,8 +86,8 @@ public class FileMmapEngine implements IOEngine {
 
   @Override
   public String toString() {
-    return "ioengine=" + this.getClass().getSimpleName() + ", path=" + this.path +
-      ", size=" + String.format("%,d", this.size);
+    return "ioengine=" + this.getClass().getSimpleName() + ", path=" + this.path + ", size="
+        + String.format("%,d", this.size);
   }
 
   /**
@@ -97,17 +96,13 @@ public class FileMmapEngine implements IOEngine {
    */
   @Override
   public boolean isPersistent() {
+    // TODO : HBASE-21981 needed for persistence to really work
     return true;
   }
 
   @Override
-  public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
-      throws IOException {
-    byte[] dst = new byte[length];
-    bufferArray.getMultiple(offset, length, dst);
-    return deserializer.deserialize(new SingleByteBuff(ByteBuffer.wrap(dst)), true,
-      MemoryType.EXCLUSIVE);
-  }
+  public abstract Cacheable read(long offset, int length,
+      CacheableDeserializer<Cacheable> deserializer) throws IOException;
 
   /**
    * Transfers data from the given byte buffer to file
@@ -119,7 +114,7 @@ public class FileMmapEngine implements IOEngine {
   public void write(ByteBuffer srcBuffer, long offset) throws IOException {
     assert srcBuffer.hasArray();
     bufferArray.putMultiple(offset, srcBuffer.remaining(), srcBuffer.array(),
-        srcBuffer.arrayOffset());
+      srcBuffer.arrayOffset());
   }
 
   @Override
@@ -127,9 +122,9 @@ public class FileMmapEngine implements IOEngine {
     // This singleByteBuff can be considered to be array backed
     assert srcBuffer.hasArray();
     bufferArray.putMultiple(offset, srcBuffer.remaining(), srcBuffer.array(),
-        srcBuffer.arrayOffset());
-
+      srcBuffer.arrayOffset());
   }
+
   /**
    * Sync the data to file after writing
    * @throws IOException
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java
new file mode 100644
index 0000000..b6a7a57
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile.bucket;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.io.hfile.Cacheable;
+import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * IO engine that stores data in pmem devices such as DCPMM. This engine also mmaps the file from
+ * the given path. But note that this path has to be a path on the pmem device so that when mmapped
+ * the file's address is mapped to the Pmem's address space and not in the DRAM. Since this address
+ * space is exclusive for the Pmem device there is no swapping out of the mmapped contents that
+ * generally happens when DRAM's free space is not enough to hold the specified file's mmapped
+ * contents. This gives us the option of using the {@code MemoryType#SHARED} type when serving the
+ * data from this pmem address space. We need not copy the blocks to the onheap space as we need to
+ * do for the case of {@code ExclusiveMemoryMmapIOEngine}.
+ */
+@InterfaceAudience.Private
+public class SharedMemoryMmapIOEngine extends FileMmapIOEngine {
+
+  // TODO this will support only one path over Pmem. To make use of multiple Pmem devices mounted,
+  // we need to support multiple paths like files IOEngine. Support later.
+  public SharedMemoryMmapIOEngine(String filePath, long capacity) throws IOException {
+    super(filePath, capacity);
+  }
+
+  @Override
+  public boolean usesSharedMemory() {
+    return true;
+  }
+
+  @Override
+  public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
+      throws IOException {
+    ByteBuff dstBuffer = bufferArray.asSubByteBuff(offset, length);
+    // Here the buffer that is created directly refers to the buffer in the actual buckets.
+    // When any cell is referring to the blocks created out of these buckets then it means that
+    // those cells are referring to a shared memory area which if evicted by the BucketCache would
+    // lead to corruption of results. Hence we set the type of the buffer as SHARED_MEMORY
+    // so that the readers using this block are aware of this fact and do the necessary action
+    // to prevent eviction till the results are either consumed or copied
+    return deserializer.deserialize(dstBuffer, true, MemoryType.SHARED);
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileMmapEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestExclusiveMemoryMmapEngine.java
similarity index 90%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileMmapEngine.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestExclusiveMemoryMmapEngine.java
index 2748d80..d0d8c8a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileMmapEngine.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestExclusiveMemoryMmapEngine.java
@@ -32,21 +32,21 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
- * Basic test for {@link FileMmapEngine}
+ * Basic test for {@link ExclusiveMemoryMmapIOEngine}
  */
 @Category({IOTests.class, SmallTests.class})
-public class TestFileMmapEngine {
+public class TestExclusiveMemoryMmapEngine {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestFileMmapEngine.class);
+      HBaseClassTestRule.forClass(TestExclusiveMemoryMmapEngine.class);
 
   @Test
   public void testFileMmapEngine() throws IOException {
     int size = 2 * 1024 * 1024; // 2 MB
     String filePath = "testFileMmapEngine";
     try {
-      FileMmapEngine fileMmapEngine = new FileMmapEngine(filePath, size);
+      ExclusiveMemoryMmapIOEngine fileMmapEngine = new ExclusiveMemoryMmapIOEngine(filePath, size);
       for (int i = 0; i < 50; i++) {
         int len = (int) Math.floor(Math.random() * 100);
         long offset = (long) Math.floor(Math.random() * size % (size - len));
diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc
index c250aa8..9a5eba9 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -733,6 +733,8 @@ See configurations, sizings, current usage, time-in-the-cache, and even detail o
 
 `LruBlockCache` is the original implementation, and is entirely within the Java heap.
 `BucketCache` is optional and mainly intended for keeping block cache data off-heap, although `BucketCache` can also be a file-backed cache.
+ In file-backed we can either use it in the file mode or the mmaped mode.
+ We also have pmem mode where the bucket cache resides on the persistent memory device.
 
 When you enable BucketCache, you are enabling a two tier caching system. We used to describe the
 tiers as "L1" and "L2" but have deprecated this terminology as of hbase-2.0.0. The "L1" cache referred to an
diff --git a/src/main/asciidoc/_chapters/hbase-default.adoc b/src/main/asciidoc/_chapters/hbase-default.adoc
index f809f28..ccd5f6f 100644
--- a/src/main/asciidoc/_chapters/hbase-default.adoc
+++ b/src/main/asciidoc/_chapters/hbase-default.adoc
@@ -1197,10 +1197,13 @@ When the size of a leaf-level, intermediate-level, or root-level
 *`hbase.bucketcache.ioengine`*::
 +
 .Description
-Where to store the contents of the bucketcache. One of: onheap,
-      offheap, or file. If a file, set it to file:PATH_TO_FILE.
-      See https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
-      for more information.
+Where to store the contents of the bucketcache. One of: offheap,
+    file, files, mmap or pmem. If a file or files, set it to file(s):PATH_TO_FILE.
+    mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE.
+    'pmem' is bucket cache over a file on the persistent memory device.
+    Use pmem:PATH_TO_FILE.
+    See https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
+    for more information.
 
 +
 .Default


[hbase] 02/24: HBASE-21871 Added support to specify a peer table name in VerifyReplication tool

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 1d5ae6f2f2480c250492499138c42869fc68290d
Author: subrat.mishra <su...@yahoo.com>
AuthorDate: Tue Mar 5 12:12:59 2019 +0530

    HBASE-21871 Added support to specify a peer table name in VerifyReplication tool
    
    Signed-off-by: Toshihiro Suzuki <br...@gmail.com>
---
 .../mapreduce/replication/VerifyReplication.java   |  28 ++++-
 .../hbase/replication/TestVerifyReplication.java   | 136 +++++++++++++++++++++
 .../hbase/replication/TestReplicationBase.java     |   6 +-
 3 files changed, 163 insertions(+), 7 deletions(-)

diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 90fe874..5aed30d 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -114,6 +114,8 @@ public class VerifyReplication extends Configured implements Tool {
   String peerFSAddress = null;
   //Peer cluster HBase root dir location
   String peerHBaseRootAddress = null;
+  //Peer Table Name
+  String peerTableName = null;
 
 
   private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
@@ -192,8 +194,10 @@ public class VerifyReplication extends Configured implements Tool {
         Configuration peerConf = HBaseConfiguration.createClusterConf(conf,
             zkClusterKey, PEER_CONFIG_PREFIX);
 
+        String peerName = peerConf.get(NAME + ".peerTableName", tableName.getNameAsString());
+        TableName peerTableName = TableName.valueOf(peerName);
         replicatedConnection = ConnectionFactory.createConnection(peerConf);
-        replicatedTable = replicatedConnection.getTable(tableName);
+        replicatedTable = replicatedConnection.getTable(peerTableName);
         scan.setStartRow(value.getRow());
 
         byte[] endRow = null;
@@ -419,6 +423,11 @@ public class VerifyReplication extends Configured implements Tool {
       conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
     }
 
+    if (peerTableName != null) {
+      LOG.info("Peer Table Name: " + peerTableName);
+      conf.set(NAME + ".peerTableName", peerTableName);
+    }
+
     conf.setInt(NAME + ".versions", versions);
     LOG.info("Number of version: " + versions);
 
@@ -617,6 +626,12 @@ public class VerifyReplication extends Configured implements Tool {
           continue;
         }
 
+        final String peerTableNameArgKey = "--peerTableName=";
+        if (cmd.startsWith(peerTableNameArgKey)) {
+          peerTableName = cmd.substring(peerTableNameArgKey.length());
+          continue;
+        }
+
         if (cmd.startsWith("--")) {
           printUsage("Invalid argument '" + cmd + "'");
           return false;
@@ -685,11 +700,11 @@ public class VerifyReplication extends Configured implements Tool {
     if (errorMsg != null && errorMsg.length() > 0) {
       System.err.println("ERROR: " + errorMsg);
     }
-    System.err.println("Usage: verifyrep [--starttime=X]" +
-        " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recomparesleep=] " +
-        "[--batch=] [--verbose] [--sourceSnapshotName=P] [--sourceSnapshotTmpDir=Q] "
-      + "[--peerSnapshotName=R] [--peerSnapshotTmpDir=S] [--peerFSAddress=T] "
-      + "[--peerHBaseRootAddress=U] <peerid|peerQuorumAddress> <tablename>");
+    System.err.println("Usage: verifyrep [--starttime=X]"
+        + " [--endtime=Y] [--families=A] [--row-prefixes=B] [--delimiter=] [--recomparesleep=] "
+        + "[--batch=] [--verbose] [--peerTableName=] [--sourceSnapshotName=P] "
+        + "[--sourceSnapshotTmpDir=Q] [--peerSnapshotName=R] [--peerSnapshotTmpDir=S] "
+        + "[--peerFSAddress=T] [--peerHBaseRootAddress=U] <peerid|peerQuorumAddress> <tablename>");
     System.err.println();
     System.err.println("Options:");
     System.err.println(" starttime    beginning of the time range");
@@ -705,6 +720,7 @@ public class VerifyReplication extends Configured implements Tool {
     System.err.println(" recomparesleep   milliseconds to sleep before recompare row, " +
         "default value is 0 which disables the recompare.");
     System.err.println(" verbose      logs row keys of good rows");
+    System.err.println(" peerTableName  Peer Table Name");
     System.err.println(" sourceSnapshotName  Source Snapshot Name");
     System.err.println(" sourceSnapshotTmpDir Tmp location to restore source table snapshot");
     System.err.println(" peerSnapshotName  Peer Snapshot Name");
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java
index 8b52390..4ef1214 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java
@@ -25,7 +25,10 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.TreeMap;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -57,7 +60,9 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.mapreduce.Job;
+import org.junit.AfterClass;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
@@ -78,6 +83,8 @@ public class TestVerifyReplication extends TestReplicationBase {
   private static final Logger LOG = LoggerFactory.getLogger(TestVerifyReplication.class);
 
   private static final String PEER_ID = "2";
+  private static final TableName peerTableName = TableName.valueOf("peerTest");
+  private static Table htable3;
 
   @Rule
   public TestName name = new TestName();
@@ -85,6 +92,22 @@ public class TestVerifyReplication extends TestReplicationBase {
   @Before
   public void setUp() throws Exception {
     cleanUp();
+    utility2.deleteTableData(peerTableName);
+  }
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TestReplicationBase.setUpBeforeClass();
+
+    TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily(
+                    ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100)
+                            .build()).build();
+
+    Connection connection2 = ConnectionFactory.createConnection(conf2);
+    try (Admin admin2 = connection2.getAdmin()) {
+      admin2.createTable(peerTable, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
+    }
+    htable3 = connection2.getTable(peerTableName);
   }
 
   private void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows)
@@ -561,4 +584,117 @@ public class TestVerifyReplication extends TestReplicationBase {
     checkRestoreTmpDir(conf1, tmpPath1, 2);
     checkRestoreTmpDir(conf2, tmpPath2, 2);
   }
+
+  private static void runBatchCopyTest() throws Exception {
+    // normal Batch tests for htable1
+    loadData("", row, noRepfamName);
+
+    Scan scan1 = new Scan();
+    List<Put> puts = new ArrayList<>(NB_ROWS_IN_BATCH);
+    ResultScanner scanner1 = htable1.getScanner(scan1);
+    Result[] res1 = scanner1.next(NB_ROWS_IN_BATCH);
+    for (Result result : res1) {
+      Put put = new Put(result.getRow());
+      for (Cell cell : result.rawCells()) {
+        put.add(cell);
+      }
+      puts.add(put);
+    }
+    scanner1.close();
+    assertEquals(NB_ROWS_IN_BATCH, res1.length);
+
+    // Copy the data to htable3
+    htable3.put(puts);
+
+    Scan scan2 = new Scan();
+    ResultScanner scanner2 = htable3.getScanner(scan2);
+    Result[] res2 = scanner2.next(NB_ROWS_IN_BATCH);
+    scanner2.close();
+    assertEquals(NB_ROWS_IN_BATCH, res2.length);
+  }
+
+  @Test
+  public void testVerifyRepJobWithPeerTableName() throws Exception {
+    // Populate the tables with same data
+    runBatchCopyTest();
+
+    // with a peerTableName along with quorum address (a cluster key)
+    String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(),
+        utility2.getClusterKey(), tableName.getNameAsString() };
+    runVerifyReplication(args, NB_ROWS_IN_BATCH, 0);
+
+    utility2.deleteTableData(peerTableName);
+    runVerifyReplication(args, 0, NB_ROWS_IN_BATCH);
+  }
+
+  @Test
+  public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Exception {
+    // Populate the tables with same data
+    runBatchCopyTest();
+
+    // Take source and target tables snapshot
+    Path rootDir = FSUtils.getRootDir(conf1);
+    FileSystem fs = rootDir.getFileSystem(conf1);
+    String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
+    SnapshotTestingUtils.createSnapshotAndValidate(utility1.getAdmin(), tableName,
+            Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true);
+
+    // Take target snapshot
+    Path peerRootDir = FSUtils.getRootDir(conf2);
+    FileSystem peerFs = peerRootDir.getFileSystem(conf2);
+    String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
+    SnapshotTestingUtils.createSnapshotAndValidate(utility2.getAdmin(), peerTableName,
+            Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true);
+
+    String peerFSAddress = peerFs.getUri().toString();
+    String tmpPath1 = utility1.getRandomDir().toString();
+    String tmpPath2 = "/tmp" + System.currentTimeMillis();
+
+    String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(),
+      "--sourceSnapshotName=" + sourceSnapshotName,
+      "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName,
+      "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress,
+      "--peerHBaseRootAddress=" + FSUtils.getRootDir(conf2), utility2.getClusterKey(),
+      tableName.getNameAsString() };
+    runVerifyReplication(args, NB_ROWS_IN_BATCH, 0);
+    checkRestoreTmpDir(conf1, tmpPath1, 1);
+    checkRestoreTmpDir(conf2, tmpPath2, 1);
+
+    Scan scan = new Scan();
+    ResultScanner rs = htable3.getScanner(scan);
+    Put put = null;
+    for (Result result : rs) {
+      put = new Put(result.getRow());
+      Cell firstVal = result.rawCells()[0];
+      put.addColumn(CellUtil.cloneFamily(firstVal), CellUtil.cloneQualifier(firstVal),
+              Bytes.toBytes("diff data"));
+      htable3.put(put);
+    }
+    Delete delete = new Delete(put.getRow());
+    htable3.delete(delete);
+
+    sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
+    SnapshotTestingUtils.createSnapshotAndValidate(utility1.getAdmin(), tableName,
+            Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true);
+
+    peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
+    SnapshotTestingUtils.createSnapshotAndValidate(utility2.getAdmin(), peerTableName,
+            Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true);
+
+    args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(),
+      "--sourceSnapshotName=" + sourceSnapshotName,
+      "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName,
+      "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress,
+      "--peerHBaseRootAddress=" + FSUtils.getRootDir(conf2), utility2.getClusterKey(),
+      tableName.getNameAsString() };
+    runVerifyReplication(args, 0, NB_ROWS_IN_BATCH);
+    checkRestoreTmpDir(conf1, tmpPath1, 2);
+    checkRestoreTmpDir(conf2, tmpPath2, 2);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    htable3.close();
+    TestReplicationBase.tearDownAfterClass();
+  }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
index dd60597..e993a78 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
@@ -164,10 +164,14 @@ public class TestReplicationBase {
   }
 
   protected static void loadData(String prefix, byte[] row) throws IOException {
+    loadData(prefix, row, famName);
+  }
+
+  protected static void loadData(String prefix, byte[] row, byte[] familyName) throws IOException {
     List<Put> puts = new ArrayList<>(NB_ROWS_IN_BATCH);
     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
       Put put = new Put(Bytes.toBytes(prefix + Integer.toString(i)));
-      put.addColumn(famName, row, row);
+      put.addColumn(familyName, row, row);
       puts.add(put);
     }
     htable1.put(puts);


[hbase] 22/24: HBASE-22001 Polish the Admin interface

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 18772ff2735a06a80c0f3c8faa2f950cd0372412
Author: zhangduo <zh...@apache.org>
AuthorDate: Sat Mar 9 07:52:58 2019 +0800

    HBASE-22001 Polish the Admin interface
    
    Signed-off-by: stack <st...@apache.org>
---
 .../java/org/apache/hadoop/hbase/client/Admin.java | 480 +++++++++++++--------
 .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 306 ++-----------
 .../hadoop/hbase/client/TestInterfaceAlign.java    |  11 +-
 .../org/apache/hadoop/hbase/util/FutureUtils.java  |  21 +
 .../master/cleaner/TestSnapshotFromMaster.java     |   8 +-
 .../hadoop/hbase/thrift2/client/ThriftAdmin.java   | 112 +----
 6 files changed, 380 insertions(+), 558 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 99db7d5..a0c5401 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.apache.hadoop.hbase.util.FutureUtils.get;
+
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.Collection;
@@ -25,6 +27,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
 import java.util.regex.Pattern;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
@@ -41,6 +44,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.client.replication.TableCFs;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
@@ -58,6 +62,7 @@ import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -74,8 +79,28 @@ import org.apache.yetus.audience.InterfaceAudience;
  */
 @InterfaceAudience.Public
 public interface Admin extends Abortable, Closeable {
+
+  /**
+   * Return the operation timeout for a rpc call.
+   * @see #getSyncWaitTimeout()
+   */
   int getOperationTimeout();
 
+  /**
+   * Return the blocking wait time for an asynchronous operation. Can be configured by
+   * {@code hbase.client.sync.wait.timeout.msec}.
+   * <p/>
+   * For several operations, such as createTable, deleteTable, etc, the rpc call will finish right
+   * after we schedule a procedure at master side, so the timeout will not be controlled by the
+   * above {@link #getOperationTimeout()}. And timeout value here tells you how much time we will
+   * wait until the procedure at master side is finished.
+   * <p/>
+   * In general, you can consider that the implementation for XXXX method is just a
+   * XXXXAsync().get(getSyncWaitTimeout(), TimeUnit.MILLISECONDS).
+   * @see #getOperationTimeout()
+   */
+  int getSyncWaitTimeout();
+
   @Override
   void abort(String why, Throwable e);
 
@@ -136,7 +161,9 @@ public interface Admin extends Abortable, Closeable {
    * @throws IOException if a remote or network exception occurs
    * @see #listTables()
    */
-  List<TableDescriptor> listTableDescriptors(Pattern pattern) throws IOException;
+  default List<TableDescriptor> listTableDescriptors(Pattern pattern) throws IOException {
+    return listTableDescriptors(pattern, false);
+  }
 
   /**
    * List all the userspace tables matching the given regular expression.
@@ -208,7 +235,9 @@ public interface Admin extends Abortable, Closeable {
    * @return array of table names
    * @throws IOException if a remote or network exception occurs
    */
-  TableName[] listTableNames(Pattern pattern) throws IOException;
+  default TableName[] listTableNames(Pattern pattern) throws IOException {
+    return listTableNames(pattern, false);
+  }
 
   /**
    * List all of the names of userspace tables.
@@ -315,7 +344,9 @@ public interface Admin extends Abortable, Closeable {
    * threads, the table may have been created between test-for-existence and attempt-at-creation).
    * @throws IOException
    */
-  void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOException;
+  default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOException {
+    get(createTableAsync(desc, splitKeys), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Creates a new table but does not block and wait for it to come online.
@@ -337,11 +368,12 @@ public interface Admin extends Abortable, Closeable {
 
   /**
    * Deletes a table. Synchronous operation.
-   *
    * @param tableName name of table to delete
    * @throws IOException if a remote or network exception occurs
    */
-  void deleteTable(TableName tableName) throws IOException;
+  default void deleteTable(TableName tableName) throws IOException {
+    get(deleteTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Deletes the table but does not block and wait for it to be completely removed.
@@ -403,8 +435,9 @@ public interface Admin extends Abortable, Closeable {
    * @param preserveSplits <code>true</code> if the splits should be preserved
    * @throws IOException if a remote or network exception occurs
    */
-  void truncateTable(TableName tableName, boolean preserveSplits)
-      throws IOException;
+  default void truncateTable(TableName tableName, boolean preserveSplits) throws IOException {
+    get(truncateTableAsync(tableName, preserveSplits), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Truncate the table but does not block and wait for it to be completely enabled. You can use
@@ -421,19 +454,20 @@ public interface Admin extends Abortable, Closeable {
       throws IOException;
 
   /**
-   * Enable a table.  May timeout.  Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)}
+   * Enable a table. May timeout. Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)}
    * and {@link #isTableEnabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
    * disabled state for it to be enabled.
-   *
    * @param tableName name of the table
    * @throws IOException if a remote or network exception occurs There could be couple types of
-   * IOException TableNotFoundException means the table doesn't exist. TableNotDisabledException
-   * means the table isn't in disabled state.
+   *           IOException TableNotFoundException means the table doesn't exist.
+   *           TableNotDisabledException means the table isn't in disabled state.
    * @see #isTableEnabled(org.apache.hadoop.hbase.TableName)
    * @see #disableTable(org.apache.hadoop.hbase.TableName)
    * @see #enableTableAsync(org.apache.hadoop.hbase.TableName)
    */
-  void enableTable(TableName tableName) throws IOException;
+  default void enableTable(TableName tableName) throws IOException {
+    get(enableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Enable the table but does not block and wait for it to be completely enabled.
@@ -501,16 +535,17 @@ public interface Admin extends Abortable, Closeable {
   Future<Void> disableTableAsync(TableName tableName) throws IOException;
 
   /**
-   * Disable table and wait on completion.  May timeout eventually.  Use {@link
-   * #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
+   * Disable table and wait on completion. May timeout eventually. Use
+   * {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
    * {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
    * enabled state for it to be disabled.
-   *
    * @param tableName
    * @throws IOException There could be couple types of IOException TableNotFoundException means the
-   * table doesn't exist. TableNotEnabledException means the table isn't in enabled state.
+   *           table doesn't exist. TableNotEnabledException means the table isn't in enabled state.
    */
-  void disableTable(TableName tableName) throws IOException;
+  default void disableTable(TableName tableName) throws IOException {
+    get(disableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Disable tables matching the passed in pattern and wait on completion. Warning: Use this method
@@ -638,8 +673,10 @@ public interface Admin extends Abortable, Closeable {
    * @param columnFamily column family descriptor of column family to be added
    * @throws IOException if a remote or network exception occurs
    */
-  void addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily)
-    throws IOException;
+  default void addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily)
+      throws IOException {
+    get(addColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Add a column family to an existing table. Asynchronous operation.
@@ -680,7 +717,10 @@ public interface Admin extends Abortable, Closeable {
    * @param columnFamily name of column family to be deleted
    * @throws IOException if a remote or network exception occurs
    */
-  void deleteColumnFamily(TableName tableName, byte[] columnFamily) throws IOException;
+  default void deleteColumnFamily(TableName tableName, byte[] columnFamily) throws IOException {
+    get(deleteColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(),
+      TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Delete a column family from a table. Asynchronous operation.
@@ -699,9 +739,9 @@ public interface Admin extends Abortable, Closeable {
       throws IOException;
 
   /**
-   * Modify an existing column family on a table. Synchronous operation.
-   * Use {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it
-   * returns a {@link Future} from which you can learn whether success or failure.
+   * Modify an existing column family on a table. Synchronous operation. Use
+   * {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns
+   * a {@link Future} from which you can learn whether success or failure.
    * @param tableName name of table
    * @param columnFamily new column family descriptor to use
    * @throws IOException if a remote or network exception occurs
@@ -723,8 +763,11 @@ public interface Admin extends Abortable, Closeable {
    * @param columnFamily new column family descriptor to use
    * @throws IOException if a remote or network exception occurs
    */
-  void modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily)
-      throws IOException;
+  default void modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily)
+      throws IOException {
+    get(modifyColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(),
+      TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Modify an existing column family on a table. Asynchronous operation.
@@ -1348,8 +1391,7 @@ public interface Admin extends Abortable, Closeable {
    * @param splitPoint the explicit position to split on
    * @throws IOException if a remote or network exception occurs
    */
-  void split(TableName tableName, byte[] splitPoint)
-    throws IOException;
+  void split(TableName tableName, byte[] splitPoint) throws IOException;
 
   /**
    * Split an individual region. Asynchronous operation.
@@ -1370,28 +1412,33 @@ public interface Admin extends Abortable, Closeable {
    * @param splitPoint the explicit position to split on
    * @throws IOException if a remote or network exception occurs
    */
-  Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint)
-    throws IOException;
+  Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException;
 
   /**
    * Modify an existing table, more IRB friendly version.
-   *
    * @param tableName name of table.
    * @param td modified description of the table
    * @throws IOException if a remote or network exception occurs
-   * @deprecated since 2.0 version and will be removed in 3.0 version.
-   *             use {@link #modifyTable(TableDescriptor)}
+   * @deprecated since 2.0 version and will be removed in 3.0 version. use
+   *             {@link #modifyTable(TableDescriptor)}
    */
   @Deprecated
-  void modifyTable(TableName tableName, TableDescriptor td)
-      throws IOException;
+  default void modifyTable(TableName tableName, TableDescriptor td) throws IOException {
+    if (!tableName.equals(td.getTableName())) {
+      throw new IllegalArgumentException("the specified table name '" + tableName +
+        "' doesn't match with the HTD one: " + td.getTableName());
+    }
+    modifyTable(td);
+  }
 
   /**
    * Modify an existing table, more IRB friendly version.
    * @param td modified description of the table
    * @throws IOException if a remote or network exception occurs
    */
-  void modifyTable(TableDescriptor td) throws IOException;
+  default void modifyTable(TableDescriptor td) throws IOException {
+    get(modifyTableAsync(td), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Modify an existing table, more IRB friendly version. Asynchronous operation.  This means that
@@ -1410,8 +1457,14 @@ public interface Admin extends Abortable, Closeable {
    *             use {@link #modifyTableAsync(TableDescriptor)}
    */
   @Deprecated
-  Future<Void> modifyTableAsync(TableName tableName, TableDescriptor td)
-      throws IOException;
+  default Future<Void> modifyTableAsync(TableName tableName, TableDescriptor td)
+      throws IOException {
+    if (!tableName.equals(td.getTableName())) {
+      throw new IllegalArgumentException("the specified table name '" + tableName +
+        "' doesn't match with the HTD one: " + td.getTableName());
+    }
+    return modifyTableAsync(td);
+  }
 
   /**
    * Modify an existing table, more IRB (ruby) friendly version. Asynchronous operation. This means that
@@ -1424,31 +1477,24 @@ public interface Admin extends Abortable, Closeable {
    * @param td description of the table
    * @throws IOException if a remote or network exception occurs
    * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the
-   *     operation to complete
+   *         operation to complete
    */
-  Future<Void> modifyTableAsync(TableDescriptor td)
-      throws IOException;
+  Future<Void> modifyTableAsync(TableDescriptor td) throws IOException;
 
   /**
-   * <p>
    * Shuts down the HBase cluster.
-   * </p>
-   * <p>
+   * <p/>
    * Notice that, a success shutdown call may ends with an error since the remote server has already
    * been shutdown.
-   * </p>
    * @throws IOException if a remote or network exception occurs
    */
   void shutdown() throws IOException;
 
   /**
-   * <p>
    * Shuts down the current HBase master only. Does not shutdown the cluster.
-   * </p>
-   * <p>
+   * <p/>
    * Notice that, a success stopMaster call may ends with an error since the remote server has
    * already been shutdown.
-   * </p>
    * @throws IOException if a remote or network exception occurs
    * @see #shutdown()
    */
@@ -1568,71 +1614,65 @@ public interface Admin extends Abortable, Closeable {
   Configuration getConfiguration();
 
   /**
-   * Create a new namespace. Blocks until namespace has been successfully created or an exception
-   * is thrown.
-   *
+   * Create a new namespace. Blocks until namespace has been successfully created or an exception is
+   * thrown.
    * @param descriptor descriptor which describes the new namespace.
    */
-  void createNamespace(NamespaceDescriptor descriptor)
-  throws IOException;
+  default void createNamespace(NamespaceDescriptor descriptor) throws IOException {
+    get(createNamespaceAsync(descriptor), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Create a new namespace.
-   *
    * @param descriptor descriptor which describes the new namespace
    * @return the result of the async create namespace operation. Use Future.get(long, TimeUnit) to
-   *  wait on the operation to complete.
+   *         wait on the operation to complete.
    */
-  Future<Void> createNamespaceAsync(NamespaceDescriptor descriptor)
-  throws IOException;
+  Future<Void> createNamespaceAsync(NamespaceDescriptor descriptor) throws IOException;
 
   /**
-   * Modify an existing namespace.  Blocks until namespace has been successfully modified or an
+   * Modify an existing namespace. Blocks until namespace has been successfully modified or an
    * exception is thrown.
-   *
    * @param descriptor descriptor which describes the new namespace
    */
-  void modifyNamespace(NamespaceDescriptor descriptor)
-  throws IOException;
+  default void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
+    get(modifyNamespaceAsync(descriptor), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Modify an existing namespace.
-   *
    * @param descriptor descriptor which describes the new namespace
    * @return the result of the async modify namespace operation. Use Future.get(long, TimeUnit) to
-   *  wait on the operation to complete.
+   *         wait on the operation to complete.
    */
-  Future<Void> modifyNamespaceAsync(NamespaceDescriptor descriptor)
-  throws IOException;
+  Future<Void> modifyNamespaceAsync(NamespaceDescriptor descriptor) throws IOException;
 
   /**
-   * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
-   * Blocks until namespace has been successfully deleted or an
-   * exception is thrown.
-   *
+   * Delete an existing namespace. Only empty namespaces (no tables) can be removed. Blocks until
+   * namespace has been successfully deleted or an exception is thrown.
    * @param name namespace name
    */
-  void deleteNamespace(String name) throws IOException;
+  default void deleteNamespace(String name) throws IOException {
+    get(deleteNamespaceAsync(name), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
-   *
    * @param name namespace name
    * @return the result of the async delete namespace operation. Use Future.get(long, TimeUnit) to
-   *  wait on the operation to complete.
+   *         wait on the operation to complete.
    */
   Future<Void> deleteNamespaceAsync(String name) throws IOException;
 
   /**
    * Get a namespace descriptor by name.
-   *
    * @param name name of namespace descriptor
    * @return A descriptor
    * @throws org.apache.hadoop.hbase.NamespaceNotFoundException
    * @throws IOException if a remote or network exception occurs
    */
   NamespaceDescriptor getNamespaceDescriptor(String name)
-  throws NamespaceNotFoundException, IOException;
+      throws NamespaceNotFoundException, IOException;
 
   /**
    * List available namespace descriptors.
@@ -1657,23 +1697,17 @@ public interface Admin extends Abortable, Closeable {
 
   /**
    * Get list of table descriptors by namespace.
-   *
    * @param name namespace name
    * @return returns a list of TableDescriptors
-   * @throws IOException
    */
-  List<TableDescriptor> listTableDescriptorsByNamespace(byte[] name)
-      throws IOException;
+  List<TableDescriptor> listTableDescriptorsByNamespace(byte[] name) throws IOException;
 
   /**
    * Get list of table names by namespace.
-   *
    * @param name namespace name
    * @return The list of table names in the namespace
-   * @throws IOException
    */
-  TableName[] listTableNamesByNamespace(String name)
-      throws IOException;
+  TableName[] listTableNamesByNamespace(String name) throws IOException;
 
   /**
    * Get the regions of a given table.
@@ -1739,17 +1773,20 @@ public interface Admin extends Abortable, Closeable {
 
   /**
    * Abort a procedure.
+   * <p/>
    * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2.
    * @param procId ID of the procedure to abort
    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
-   * @return <code>true</code> if aborted, <code>false</code> if procedure already completed or does not exist
+   * @return <code>true</code> if aborted, <code>false</code> if procedure already completed or does
+   *         not exist
    * @throws IOException
    * @deprecated Since 2.1.1 -- to be removed.
    */
   @Deprecated
-  boolean abortProcedure(
-      long procId,
-      boolean mayInterruptIfRunning) throws IOException;
+  default boolean abortProcedure(long procId, boolean mayInterruptIfRunning) throws IOException {
+    return get(abortProcedureAsync(procId, mayInterruptIfRunning), getSyncWaitTimeout(),
+      TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Abort a procedure but does not block and wait for completion.
@@ -1878,19 +1915,20 @@ public interface Admin extends Abortable, Closeable {
    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
    * taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique
    * based on <b>the name of the snapshot</b>. Attempts to take a snapshot with the same name (even
-   * a different type or with different parameters) will fail with a {@link
-   * org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate naming.
-   * Snapshot names follow the same naming constraints as tables in HBase. See {@link
-   * org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
-   *
+   * a different type or with different parameters) will fail with a
+   * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate
+   * naming. Snapshot names follow the same naming constraints as tables in HBase. See
+   * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
    * @param snapshotName name of the snapshot to be created
    * @param tableName name of the table for which snapshot is created
    * @throws IOException if a remote or network exception occurs
    * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if snapshot creation failed
    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
    */
-  void snapshot(String snapshotName, TableName tableName)
-      throws IOException, SnapshotCreationException, IllegalArgumentException;
+  default void snapshot(String snapshotName, TableName tableName)
+      throws IOException, SnapshotCreationException, IllegalArgumentException {
+    snapshot(snapshotName, tableName, SnapshotType.FLUSH);
+  }
 
   /**
    * Create a timestamp consistent snapshot for the given table. Snapshots are considered unique
@@ -1898,15 +1936,19 @@ public interface Admin extends Abortable, Closeable {
    * different type or with different parameters) will fail with a {@link SnapshotCreationException}
    * indicating the duplicate naming. Snapshot names follow the same naming constraints as tables in
    * HBase.
-   *
    * @param snapshotName name of the snapshot to be created
    * @param tableName name of the table for which snapshot is created
    * @throws IOException if a remote or network exception occurs
    * @throws SnapshotCreationException if snapshot creation failed
    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
+   * @deprecated since 2.3.0, will be removed in 3.0.0. Use {@link #snapshot(String, TableName)}
+   *             instead.
    */
-  void snapshot(byte[] snapshotName, TableName tableName)
-      throws IOException, SnapshotCreationException, IllegalArgumentException;
+  @Deprecated
+  default void snapshot(byte[] snapshotName, TableName tableName)
+      throws IOException, SnapshotCreationException, IllegalArgumentException {
+    snapshot(Bytes.toString(snapshotName), tableName);
+  }
 
   /**
    * Create typed snapshot of the table. Snapshots are considered unique based on <b>the name of the
@@ -1914,19 +1956,18 @@ public interface Admin extends Abortable, Closeable {
    * different parameters) will fail with a {@link SnapshotCreationException} indicating the
    * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See
    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
-   *
    * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
-   * snapshots stored on the cluster
+   *          snapshots stored on the cluster
    * @param tableName name of the table to snapshot
    * @param type type of snapshot to take
    * @throws IOException we fail to reach the master
    * @throws SnapshotCreationException if snapshot creation failed
    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
    */
-  void snapshot(String snapshotName,
-      TableName tableName,
-      SnapshotType type) throws IOException, SnapshotCreationException,
-      IllegalArgumentException;
+  default void snapshot(String snapshotName, TableName tableName, SnapshotType type)
+      throws IOException, SnapshotCreationException, IllegalArgumentException {
+    snapshot(new SnapshotDescription(snapshotName, tableName, type));
+  }
 
   /**
    * Take a snapshot and wait for the server to complete that snapshot (blocking). Only a single
@@ -1935,12 +1976,11 @@ public interface Admin extends Abortable, Closeable {
    * single cluster). Snapshots are considered unique based on <b>the name of the snapshot</b>.
    * Attempts to take a snapshot with the same name (even a different type or with different
    * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming.
-   * Snapshot names follow the same naming constraints as tables in HBase. See {@link
-   * org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should probably
-   * use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} or
+   * Snapshot names follow the same naming constraints as tables in HBase. See
+   * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should
+   * probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} or
    * {@link #snapshot(byte[], org.apache.hadoop.hbase.TableName)} unless you are sure about the type
    * of snapshot that you want to take.
-   *
    * @param snapshot snapshot to take
    * @throws IOException or we lose contact with the master.
    * @throws SnapshotCreationException if snapshot failed to be taken
@@ -1961,21 +2001,22 @@ public interface Admin extends Abortable, Closeable {
    * {@link #snapshotAsync(SnapshotDescription)} instead.
    */
   @Deprecated
+  @SuppressWarnings("FutureReturnValueIgnored")
   default void takeSnapshotAsync(SnapshotDescription snapshot)
-  throws IOException, SnapshotCreationException {
+      throws IOException, SnapshotCreationException {
     snapshotAsync(snapshot);
   }
 
   /**
    * Take a snapshot without waiting for the server to complete that snapshot (asynchronous) Only a
    * single snapshot should be taken at a time, or results may be undefined.
-   *
    * @param snapshot snapshot to take
    * @throws IOException if the snapshot did not succeed or we lose contact with the master.
    * @throws SnapshotCreationException if snapshot creation failed
    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
    */
-  void snapshotAsync(SnapshotDescription snapshot) throws IOException, SnapshotCreationException;
+  Future<Void> snapshotAsync(SnapshotDescription snapshot)
+      throws IOException, SnapshotCreationException;
 
   /**
    * Check the current state of the passed snapshot. There are three possible states: <ol>
@@ -1998,26 +2039,29 @@ public interface Admin extends Abortable, Closeable {
 
   /**
    * Restore the specified snapshot on the original table. (The table must be disabled) If the
-   * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to <code>true</code>, a
-   * snapshot of the current table is taken before executing the restore operation. In case of
-   * restore failure, the failsafe snapshot will be restored. If the restore completes without
-   * problem the failsafe snapshot is deleted.
-   *
+   * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to
+   * <code>true</code>, a snapshot of the current table is taken before executing the restore
+   * operation. In case of restore failure, the failsafe snapshot will be restored. If the restore
+   * completes without problem the failsafe snapshot is deleted.
    * @param snapshotName name of the snapshot to restore
    * @throws IOException if a remote or network exception occurs
    * @throws org.apache.hadoop.hbase.snapshot.RestoreSnapshotException if snapshot failed to be
-   * restored
+   *           restored
    * @throws IllegalArgumentException if the restore request is formatted incorrectly
+   * @deprecated since 2.3.0, will be removed in 3.0.0. Use {@link #restoreSnapshot(String)}
+   *             instead.
    */
-  void restoreSnapshot(byte[] snapshotName) throws IOException, RestoreSnapshotException;
+  @Deprecated
+  default void restoreSnapshot(byte[] snapshotName) throws IOException, RestoreSnapshotException {
+    restoreSnapshot(Bytes.toString(snapshotName));
+  }
 
   /**
    * Restore the specified snapshot on the original table. (The table must be disabled) If the
-   * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to <code>true</code>, a
-   * snapshot of the current table is taken before executing the restore operation. In case of
-   * restore failure, the failsafe snapshot will be restored. If the restore completes without
-   * problem the failsafe snapshot is deleted.
-   *
+   * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to
+   * <code>true</code>, a snapshot of the current table is taken before executing the restore
+   * operation. In case of restore failure, the failsafe snapshot will be restored. If the restore
+   * completes without problem the failsafe snapshot is deleted.
    * @param snapshotName name of the snapshot to restore
    * @throws IOException if a remote or network exception occurs
    * @throws RestoreSnapshotException if snapshot failed to be restored
@@ -2027,59 +2071,66 @@ public interface Admin extends Abortable, Closeable {
 
   /**
    * Restore the specified snapshot on the original table. (The table must be disabled) If the
-   * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to <code>true</code>, a
-   * snapshot of the current table is taken before executing the restore operation. In case of
-   * restore failure, the failsafe snapshot will be restored. If the restore completes without
-   * problem the failsafe snapshot is deleted.
-   *
+   * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to
+   * <code>true</code>, a snapshot of the current table is taken before executing the restore
+   * operation. In case of restore failure, the failsafe snapshot will be restored. If the restore
+   * completes without problem the failsafe snapshot is deleted.
    * @param snapshotName name of the snapshot to restore
    * @throws IOException if a remote or network exception occurs
    * @throws RestoreSnapshotException if snapshot failed to be restored
-   * @return the result of the async restore snapshot. You can use Future.get(long, TimeUnit)
-   *    to wait on the operation to complete.
+   * @return the result of the async restore snapshot. You can use Future.get(long, TimeUnit) to
+   *         wait on the operation to complete.
+   * @deprecated since 2.3.0, will be removed in 3.0.0. The implementation does not take care of the
+   *             failsafe property, so do not use it any more.
    */
+  @Deprecated
   Future<Void> restoreSnapshotAsync(String snapshotName)
       throws IOException, RestoreSnapshotException;
 
   /**
    * Restore the specified snapshot on the original table. (The table must be disabled) If
-   * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken before
-   * executing the restore operation. In case of restore failure, the failsafe snapshot will be
-   * restored. If the restore completes without problem the failsafe snapshot is deleted. The
+   * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken
+   * before executing the restore operation. In case of restore failure, the failsafe snapshot will
+   * be restored. If the restore completes without problem the failsafe snapshot is deleted. The
    * failsafe snapshot name is configurable by using the property
    * "hbase.snapshot.restore.failsafe.name".
-   *
    * @param snapshotName name of the snapshot to restore
    * @param takeFailSafeSnapshot <code>true</code> if the failsafe snapshot should be taken
    * @throws IOException if a remote or network exception occurs
    * @throws RestoreSnapshotException if snapshot failed to be restored
    * @throws IllegalArgumentException if the restore request is formatted incorrectly
+   * @deprecated since 2.3.0, will be removed in 3.0.0. Use
+   *             {@link #restoreSnapshot(String, boolean)} instead.
    */
-  void restoreSnapshot(byte[] snapshotName, boolean takeFailSafeSnapshot)
-      throws IOException, RestoreSnapshotException;
+  @Deprecated
+  default void restoreSnapshot(byte[] snapshotName, boolean takeFailSafeSnapshot)
+      throws IOException, RestoreSnapshotException {
+    restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
+  }
 
   /**
    * Restore the specified snapshot on the original table. (The table must be disabled) If
-   * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken before
-   * executing the restore operation. In case of restore failure, the failsafe snapshot will be
-   * restored. If the restore completes without problem the failsafe snapshot is deleted. The
+   * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken
+   * before executing the restore operation. In case of restore failure, the failsafe snapshot will
+   * be restored. If the restore completes without problem the failsafe snapshot is deleted. The
    * failsafe snapshot name is configurable by using the property
    * "hbase.snapshot.restore.failsafe.name".
-   *
    * @param snapshotName name of the snapshot to restore
    * @param takeFailSafeSnapshot <code>true</code> if the failsafe snapshot should be taken
    * @throws IOException if a remote or network exception occurs
    * @throws RestoreSnapshotException if snapshot failed to be restored
    * @throws IllegalArgumentException if the restore request is formatted incorrectly
    */
-  void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot)
-      throws IOException, RestoreSnapshotException;
+  default void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot)
+      throws IOException, RestoreSnapshotException {
+    restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
+  }
 
   /**
    * Restore the specified snapshot on the original table. (The table must be disabled) If
-   * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken before
-   * executing the restore operation. In case of restore failure, the failsafe snapshot will be
-   * restored. If the restore completes without problem the failsafe snapshot is deleted. The
+   * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken
+   * before executing the restore operation. In case of restore failure, the failsafe snapshot will
+   * be restored. If the restore completes without problem the failsafe snapshot is deleted. The
    * failsafe snapshot name is configurable by using the property
    * "hbase.snapshot.restore.failsafe.name".
    * @param snapshotName name of the snapshot to restore
@@ -2094,60 +2145,81 @@ public interface Admin extends Abortable, Closeable {
 
   /**
    * Create a new table by cloning the snapshot content.
-   *
    * @param snapshotName name of the snapshot to be cloned
    * @param tableName name of the table where the snapshot will be restored
    * @throws IOException if a remote or network exception occurs
    * @throws TableExistsException if table to be created already exists
    * @throws RestoreSnapshotException if snapshot failed to be cloned
    * @throws IllegalArgumentException if the specified table has not a valid name
+   * @deprecated since 2.3.0, will be removed in 3.0.0. Use
+   *             {@link #cloneSnapshot(String, TableName)} instead.
    */
-  void cloneSnapshot(byte[] snapshotName, TableName tableName)
-      throws IOException, TableExistsException, RestoreSnapshotException;
+  @Deprecated
+  default void cloneSnapshot(byte[] snapshotName, TableName tableName)
+      throws IOException, TableExistsException, RestoreSnapshotException {
+    cloneSnapshot(Bytes.toString(snapshotName), tableName);
+  }
 
   /**
    * Create a new table by cloning the snapshot content.
    * @param snapshotName name of the snapshot to be cloned
    * @param tableName name of the table where the snapshot will be restored
-   * @param restoreAcl <code>true</code> to clone acl into newly created table
    * @throws IOException if a remote or network exception occurs
    * @throws TableExistsException if table to be created already exists
    * @throws RestoreSnapshotException if snapshot failed to be cloned
    * @throws IllegalArgumentException if the specified table has not a valid name
    */
-  void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl)
-      throws IOException, TableExistsException, RestoreSnapshotException;
+  default void cloneSnapshot(String snapshotName, TableName tableName)
+      throws IOException, TableExistsException, RestoreSnapshotException {
+    cloneSnapshot(snapshotName, tableName, false);
+  }
 
   /**
    * Create a new table by cloning the snapshot content.
-   *
    * @param snapshotName name of the snapshot to be cloned
    * @param tableName name of the table where the snapshot will be restored
+   * @param restoreAcl <code>true</code> to clone acl into newly created table
    * @throws IOException if a remote or network exception occurs
    * @throws TableExistsException if table to be created already exists
    * @throws RestoreSnapshotException if snapshot failed to be cloned
    * @throws IllegalArgumentException if the specified table has not a valid name
    */
-  void cloneSnapshot(String snapshotName, TableName tableName)
-      throws IOException, TableExistsException, RestoreSnapshotException;
+  default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl)
+      throws IOException, TableExistsException, RestoreSnapshotException {
+    get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl), getSyncWaitTimeout(),
+      TimeUnit.MILLISECONDS);
+  }
 
   /**
-   * Create a new table by cloning the snapshot content, but does not block
-   * and wait for it to be completely cloned.
-   * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
-   * It may throw ExecutionException if there was an error while executing the operation
-   * or TimeoutException in case the wait timeout was not long enough to allow the
-   * operation to complete.
-   *
+   * Create a new table by cloning the snapshot content, but does not block and wait for it to be
+   * completely cloned. You can use Future.get(long, TimeUnit) to wait on the operation to complete.
+   * It may throw ExecutionException if there was an error while executing the operation or
+   * TimeoutException in case the wait timeout was not long enough to allow the operation to
+   * complete.
    * @param snapshotName name of the snapshot to be cloned
    * @param tableName name of the table where the snapshot will be restored
    * @throws IOException if a remote or network exception occurs
    * @throws TableExistsException if table to be cloned already exists
-   * @return the result of the async clone snapshot. You can use Future.get(long, TimeUnit)
-   *    to wait on the operation to complete.
+   * @return the result of the async clone snapshot. You can use Future.get(long, TimeUnit) to wait
+   *         on the operation to complete.
+   */
+  default Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName)
+      throws IOException, TableExistsException {
+    return cloneSnapshotAsync(snapshotName, tableName, false);
+  }
+
+  /**
+   * Create a new table by cloning the snapshot content.
+   * @param snapshotName name of the snapshot to be cloned
+   * @param tableName name of the table where the snapshot will be restored
+   * @param restoreAcl <code>true</code> to clone acl into newly created table
+   * @throws IOException if a remote or network exception occurs
+   * @throws TableExistsException if table to be created already exists
+   * @throws RestoreSnapshotException if snapshot failed to be cloned
+   * @throws IllegalArgumentException if the specified table has not a valid name
    */
-  Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName)
-      throws IOException, TableExistsException;
+  Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName, boolean restoreAcl)
+      throws IOException, TableExistsException, RestoreSnapshotException;
 
   /**
    * Execute a distributed procedure on a cluster.
@@ -2183,17 +2255,16 @@ public interface Admin extends Abortable, Closeable {
 
   /**
    * Execute a distributed procedure on a cluster.
-   *
    * @param signature A distributed procedure is uniquely identified by its signature (default the
-   * root ZK node name of the procedure).
+   *          root ZK node name of the procedure).
    * @param instance The instance name of the procedure. For some procedures, this parameter is
-   * optional.
+   *          optional.
    * @param props Property/Value pairs of properties passing to the procedure
    * @return data returned after procedure execution. null if no return data.
    * @throws IOException
    */
   byte[] execProcedureWithReturn(String signature, String instance, Map<String, String> props)
-  throws IOException;
+      throws IOException;
 
   /**
    * Check the current state of the specified procedure. There are three possible states: <ol>
@@ -2519,12 +2590,15 @@ public interface Admin extends Abortable, Closeable {
    * @param enabled peer state, true if ENABLED and false if DISABLED
    * @throws IOException if a remote or network exception occurs
    */
-  void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-      throws IOException;
+  default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
+      throws IOException {
+    get(addReplicationPeerAsync(peerId, peerConfig, enabled), getSyncWaitTimeout(),
+      TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Add a new replication peer but does not block and wait for it.
-   * <p>
+   * <p/>
    * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
    * ExecutionException if there was an error while executing the operation or TimeoutException in
    * case the wait timeout was not long enough to allow the operation to complete.
@@ -2558,7 +2632,10 @@ public interface Admin extends Abortable, Closeable {
    * @param peerId a short name that identifies the peer
    * @throws IOException if a remote or network exception occurs
    */
-  void removeReplicationPeer(String peerId) throws IOException;
+  default void removeReplicationPeer(String peerId) throws IOException {
+    get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(),
+      TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Remove a replication peer but does not block and wait for it.
@@ -2577,7 +2654,9 @@ public interface Admin extends Abortable, Closeable {
    * @param peerId a short name that identifies the peer
    * @throws IOException if a remote or network exception occurs
    */
-  void enableReplicationPeer(String peerId) throws IOException;
+  default void enableReplicationPeer(String peerId) throws IOException {
+    get(enableReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Enable a replication peer but does not block and wait for it.
@@ -2596,11 +2675,13 @@ public interface Admin extends Abortable, Closeable {
    * @param peerId a short name that identifies the peer
    * @throws IOException if a remote or network exception occurs
    */
-  void disableReplicationPeer(String peerId) throws IOException;
+  default void disableReplicationPeer(String peerId) throws IOException {
+    get(disableReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Disable a replication peer but does not block and wait for it.
-   * <p>
+   * <p/>
    * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
    * ExecutionException if there was an error while executing the operation or TimeoutException in
    * case the wait timeout was not long enough to allow the operation to complete.
@@ -2624,12 +2705,15 @@ public interface Admin extends Abortable, Closeable {
    * @param peerConfig new config for the replication peer
    * @throws IOException if a remote or network exception occurs
    */
-  void updateReplicationPeerConfig(String peerId,
-      ReplicationPeerConfig peerConfig) throws IOException;
+  default void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
+      throws IOException {
+    get(updateReplicationPeerConfigAsync(peerId, peerConfig), getSyncWaitTimeout(),
+      TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Update the peerConfig for the specified peer but does not block and wait for it.
-   * <p>
+   * <p/>
    * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
    * ExecutionException if there was an error while executing the operation or TimeoutException in
    * case the wait timeout was not long enough to allow the operation to complete.
@@ -2648,9 +2732,16 @@ public interface Admin extends Abortable, Closeable {
    * @throws ReplicationException if tableCfs has conflict with existing config
    * @throws IOException if a remote or network exception occurs
    */
-  void appendReplicationPeerTableCFs(String id,
-      Map<TableName, List<String>> tableCfs)
-      throws ReplicationException, IOException;
+  default void appendReplicationPeerTableCFs(String id, Map<TableName, List<String>> tableCfs)
+      throws ReplicationException, IOException {
+    if (tableCfs == null) {
+      throw new ReplicationException("tableCfs is null");
+    }
+    ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
+    ReplicationPeerConfig newPeerConfig =
+      ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(tableCfs, peerConfig);
+    updateReplicationPeerConfig(id, newPeerConfig);
+  }
 
   /**
    * Remove some table-cfs from config of the specified peer.
@@ -2659,9 +2750,16 @@ public interface Admin extends Abortable, Closeable {
    * @throws ReplicationException if tableCfs has conflict with existing config
    * @throws IOException if a remote or network exception occurs
    */
-  void removeReplicationPeerTableCFs(String id,
-      Map<TableName, List<String>> tableCfs)
-      throws ReplicationException, IOException;
+  default void removeReplicationPeerTableCFs(String id, Map<TableName, List<String>> tableCfs)
+      throws ReplicationException, IOException {
+    if (tableCfs == null) {
+      throw new ReplicationException("tableCfs is null");
+    }
+    ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
+    ReplicationPeerConfig newPeerConfig =
+      ReplicationPeerConfigUtil.removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id);
+    updateReplicationPeerConfig(id, newPeerConfig);
+  }
 
   /**
    * Return a list of replication peers.
@@ -2684,8 +2782,11 @@ public interface Admin extends Abortable, Closeable {
    * @param state a new state of current cluster
    * @throws IOException if a remote or network exception occurs
    */
-  void transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
-      throws IOException;
+  default void transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
+      throws IOException {
+    get(transitReplicationPeerSyncReplicationStateAsync(peerId, state), getSyncWaitTimeout(),
+      TimeUnit.MILLISECONDS);
+  }
 
   /**
    * Transit current cluster to a new state in a synchronous replication peer. But does not block
@@ -2786,25 +2887,24 @@ public interface Admin extends Abortable, Closeable {
    * @throws IOException if a remote or network exception occurs
    * @return List of servers that are not cleared
    */
-  List<ServerName> clearDeadServers(final List<ServerName> servers) throws IOException;
+  List<ServerName> clearDeadServers(List<ServerName> servers) throws IOException;
 
   /**
    * Create a new table by cloning the existent table schema.
-   *
    * @param tableName name of the table to be cloned
    * @param newTableName name of the new table where the table will be created
    * @param preserveSplits True if the splits should be preserved
    * @throws IOException if a remote or network exception occurs
    */
-  void cloneTableSchema(final TableName tableName, final TableName newTableName,
-      final boolean preserveSplits) throws IOException;
+  void cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits)
+      throws IOException;
 
   /**
    * Switch the rpc throttle enable state.
    * @param enable Set to <code>true</code> to enable, <code>false</code> to disable.
    * @return Previous rpc throttle enabled value
    */
-  boolean switchRpcThrottle(final boolean enable) throws IOException;
+  boolean switchRpcThrottle(boolean enable) throws IOException;
 
   /**
    * Get if the rpc throttle is enabled.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 6a38ead..1bfb7b3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
 import com.google.protobuf.RpcController;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -45,7 +44,6 @@ import java.util.function.Supplier;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.CacheEvictionStats;
@@ -87,7 +85,6 @@ import org.apache.hadoop.hbase.quotas.QuotaRetriever;
 import org.apache.hadoop.hbase.quotas.QuotaSettings;
 import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
@@ -270,6 +267,11 @@ public class HBaseAdmin implements Admin {
     return operationTimeout;
   }
 
+  @Override
+  public int getSyncWaitTimeout() {
+    return syncWaitTimeout;
+  }
+
   HBaseAdmin(ClusterConnection connection) throws IOException {
     this.conf = connection.getConfiguration();
     this.connection = connection;
@@ -335,11 +337,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public List<TableDescriptor> listTableDescriptors(Pattern pattern) throws IOException {
-    return listTableDescriptors(pattern, false);
-  }
-
-  @Override
   public List<TableDescriptor> listTableDescriptors(Pattern pattern, boolean includeSysTables)
       throws IOException {
     return executeCallable(new MasterCallable<List<TableDescriptor>>(getConnection(),
@@ -362,11 +359,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void modifyTable(TableDescriptor td) throws IOException {
-    get(modifyTableAsync(td), syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> modifyTableAsync(TableDescriptor td) throws IOException {
     ModifyTableResponse response = executeCallable(
       new MasterCallable<ModifyTableResponse>(getConnection(), getRpcControllerFactory()) {
@@ -511,11 +503,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public TableName[] listTableNames(Pattern pattern) throws IOException {
-    return listTableNames(pattern, false);
-  }
-
-  @Override
   public TableName[] listTableNames(String regex) throws IOException {
     return listTableNames(Pattern.compile(regex), false);
   }
@@ -611,38 +598,30 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void createTable(TableDescriptor desc)
-  throws IOException {
+  public void createTable(TableDescriptor desc) throws IOException {
     createTable(desc, null);
   }
 
   @Override
-  public void createTable(TableDescriptor desc, byte [] startKey,
-      byte [] endKey, int numRegions)
-  throws IOException {
-    if(numRegions < 3) {
+  public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions)
+      throws IOException {
+    if (numRegions < 3) {
       throw new IllegalArgumentException("Must create at least three regions");
-    } else if(Bytes.compareTo(startKey, endKey) >= 0) {
+    } else if (Bytes.compareTo(startKey, endKey) >= 0) {
       throw new IllegalArgumentException("Start key must be smaller than end key");
     }
     if (numRegions == 3) {
-      createTable(desc, new byte[][]{startKey, endKey});
+      createTable(desc, new byte[][] { startKey, endKey });
       return;
     }
-    byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
-    if(splitKeys == null || splitKeys.length != numRegions - 1) {
+    byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
+    if (splitKeys == null || splitKeys.length != numRegions - 1) {
       throw new IllegalArgumentException("Unable to split key range into enough regions");
     }
     createTable(desc, splitKeys);
   }
 
   @Override
-  public void createTable(final TableDescriptor desc, byte [][] splitKeys)
-      throws IOException {
-    get(createTableAsync(desc, splitKeys), syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> createTableAsync(final TableDescriptor desc, final byte[][] splitKeys)
       throws IOException {
     if (desc.getTableName() == null) {
@@ -712,11 +691,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void deleteTable(final TableName tableName) throws IOException {
-    get(deleteTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> deleteTableAsync(final TableName tableName) throws IOException {
     DeleteTableResponse response = executeCallable(
       new MasterCallable<DeleteTableResponse>(getConnection(), getRpcControllerFactory()) {
@@ -792,12 +766,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void truncateTable(final TableName tableName, final boolean preserveSplits)
-      throws IOException {
-    get(truncateTableAsync(tableName, preserveSplits), syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> truncateTableAsync(final TableName tableName, final boolean preserveSplits)
       throws IOException {
     TruncateTableResponse response =
@@ -858,12 +826,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void enableTable(final TableName tableName)
-  throws IOException {
-    get(enableTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> enableTableAsync(final TableName tableName) throws IOException {
     TableName.isLegalFullyQualifiedTableName(tableName.getName());
     EnableTableResponse response = executeCallable(
@@ -923,12 +885,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void disableTable(final TableName tableName)
-  throws IOException {
-    get(disableTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> disableTableAsync(final TableName tableName) throws IOException {
     TableName.isLegalFullyQualifiedTableName(tableName.getName());
     DisableTableResponse response = executeCallable(
@@ -1042,12 +998,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void addColumnFamily(final TableName tableName, final ColumnFamilyDescriptor columnFamily)
-      throws IOException {
-    get(addColumnFamilyAsync(tableName, columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> addColumnFamilyAsync(final TableName tableName,
       final ColumnFamilyDescriptor columnFamily) throws IOException {
     AddColumnResponse response =
@@ -1087,14 +1037,8 @@ public class HBaseAdmin implements Admin {
   @Override
   @Deprecated
   public void deleteColumn(final TableName tableName, final byte[] columnFamily)
-  throws IOException {
-    deleteColumnFamily(tableName, columnFamily);
-  }
-
-  @Override
-  public void deleteColumnFamily(final TableName tableName, final byte[] columnFamily)
       throws IOException {
-    get(deleteColumnFamilyAsync(tableName, columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
+    deleteColumnFamily(tableName, columnFamily);
   }
 
   @Override
@@ -1131,12 +1075,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void modifyColumnFamily(final TableName tableName,
-      final ColumnFamilyDescriptor columnFamily) throws IOException {
-    get(modifyColumnFamilyAsync(tableName, columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> modifyColumnFamilyAsync(final TableName tableName,
       final ColumnFamilyDescriptor columnFamily) throws IOException {
     ModifyColumnResponse response =
@@ -1983,22 +1921,6 @@ public class HBaseAdmin implements Admin {
     splitRegionAsync(regionServerPair.getFirst(), splitPoint);
   }
 
-  @Override
-  public void modifyTable(final TableName tableName, final TableDescriptor td)
-      throws IOException {
-    get(modifyTableAsync(tableName, td), syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
-  public Future<Void> modifyTableAsync(final TableName tableName, final TableDescriptor td)
-      throws IOException {
-    if (!tableName.equals(td.getTableName())) {
-      throw new IllegalArgumentException("the specified table name '" + tableName +
-        "' doesn't match with the HTD one: " + td.getTableName());
-    }
-    return modifyTableAsync(td);
-  }
-
   private static class ModifyTableFuture extends TableFuture<Void> {
     public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName,
         final ModifyTableResponse response) {
@@ -2229,12 +2151,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void createNamespace(final NamespaceDescriptor descriptor)
-  throws IOException {
-    get(createNamespaceAsync(descriptor), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> createNamespaceAsync(final NamespaceDescriptor descriptor)
       throws IOException {
     CreateNamespaceResponse response =
@@ -2256,12 +2172,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void modifyNamespace(final NamespaceDescriptor descriptor)
-  throws IOException {
-    get(modifyNamespaceAsync(descriptor), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> modifyNamespaceAsync(final NamespaceDescriptor descriptor)
       throws IOException {
     ModifyNamespaceResponse response =
@@ -2283,12 +2193,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void deleteNamespace(final String name)
-  throws IOException {
-    get(deleteNamespaceAsync(name), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> deleteNamespaceAsync(final String name)
       throws IOException {
     DeleteNamespaceResponse response =
@@ -2489,44 +2393,6 @@ public class HBaseAdmin implements Admin {
     }
   }
 
-  /**
-   * Roll the log writer. I.e. when using a file system based write ahead log,
-   * start writing log messages to a new file.
-   *
-   * Note that when talking to a version 1.0+ HBase deployment, the rolling is asynchronous.
-   * This method will return as soon as the roll is requested and the return value will
-   * always be null. Additionally, the named region server may schedule store flushes at the
-   * request of the wal handling the roll request.
-   *
-   * When talking to a 0.98 or older HBase deployment, the rolling is synchronous and the
-   * return value may be either null or a list of encoded region names.
-   *
-   * @param serverName
-   *          The servername of the regionserver. A server name is made of host,
-   *          port and startcode. This is mandatory. Here is an example:
-   *          <code> host187.example.com,60020,1289493121758</code>
-   * @return a set of {@link HRegionInfo#getEncodedName()} that would allow the wal to
-   *         clean up some underlying files. null if there's nothing to flush.
-   * @throws IOException if a remote or network exception occurs
-   * @throws FailedLogCloseException
-   * @deprecated use {@link #rollWALWriter(ServerName)}
-   */
-  @Deprecated
-  public synchronized byte[][] rollHLogWriter(String serverName)
-      throws IOException, FailedLogCloseException {
-    ServerName sn = ServerName.valueOf(serverName);
-    final RollWALWriterResponse response = rollWALWriterImpl(sn);
-    int regionCount = response.getRegionToFlushCount();
-    if (0 == regionCount) {
-      return null;
-    }
-    byte[][] regionsToFlush = new byte[regionCount][];
-    for (int i = 0; i < regionCount; i++) {
-      regionsToFlush[i] = ProtobufUtil.toBytes(response.getRegionToFlush(i));
-    }
-    return regionsToFlush;
-  }
-
   @Override
   public synchronized void rollWALWriter(ServerName serverName)
       throws IOException, FailedLogCloseException {
@@ -2568,26 +2434,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void snapshot(final String snapshotName,
-                       final TableName tableName) throws IOException,
-      SnapshotCreationException, IllegalArgumentException {
-    snapshot(snapshotName, tableName, SnapshotType.FLUSH);
-  }
-
-  @Override
-  public void snapshot(final byte[] snapshotName, final TableName tableName)
-      throws IOException, SnapshotCreationException, IllegalArgumentException {
-    snapshot(Bytes.toString(snapshotName), tableName, SnapshotType.FLUSH);
-  }
-
-  @Override
-  public void snapshot(final String snapshotName, final TableName tableName,
-      SnapshotType type)
-      throws IOException, SnapshotCreationException, IllegalArgumentException {
-    snapshot(new SnapshotDescription(snapshotName, tableName, type));
-  }
-
-  @Override
   public void snapshot(SnapshotDescription snapshotDesc)
       throws IOException, SnapshotCreationException, IllegalArgumentException {
     // actually take the snapshot
@@ -2632,9 +2478,35 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void snapshotAsync(SnapshotDescription snapshotDesc) throws IOException,
-      SnapshotCreationException {
+  public Future<Void> snapshotAsync(SnapshotDescription snapshotDesc)
+      throws IOException, SnapshotCreationException {
     asyncSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc));
+    return new ProcedureFuture<Void>(this, null) {
+
+      @Override
+      protected Void waitOperationResult(long deadlineTs) throws IOException, TimeoutException {
+        waitForState(deadlineTs, new WaitForStateCallable() {
+
+          @Override
+          public void throwInterruptedException() throws InterruptedIOException {
+            throw new InterruptedIOException(
+              "Interrupted while waiting for taking snapshot" + snapshotDesc);
+          }
+
+          @Override
+          public void throwTimeoutException(long elapsedTime) throws TimeoutException {
+            throw new TimeoutException("Snapshot '" + snapshotDesc.getName() +
+              "' wasn't completed in expectedTime:" + elapsedTime + " ms");
+          }
+
+          @Override
+          public boolean checkState(int tries) throws IOException {
+            return isSnapshotFinished(snapshotDesc);
+          }
+        });
+        return null;
+      }
+    };
   }
 
   private SnapshotResponse asyncSnapshot(SnapshotProtos.SnapshotDescription snapshot)
@@ -2688,7 +2560,7 @@ public class HBaseAdmin implements Admin {
     restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
   }
 
-  /*
+  /**
    * Check whether the snapshot exists and contains disabled table
    *
    * @param snapshotName name of the snapshot to restore
@@ -2806,36 +2678,12 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
-      throws IOException, TableExistsException, RestoreSnapshotException {
-    cloneSnapshot(Bytes.toString(snapshotName), tableName);
-  }
-
-  @Override
-  public void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl)
-      throws IOException, TableExistsException, RestoreSnapshotException {
+  public Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName,
+      boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException {
     if (tableExists(tableName)) {
       throw new TableExistsException(tableName);
     }
-    get(
-      internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl),
-      Integer.MAX_VALUE,
-      TimeUnit.MILLISECONDS);
-  }
-
-  @Override
-  public void cloneSnapshot(final String snapshotName, final TableName tableName)
-      throws IOException, TableExistsException, RestoreSnapshotException {
-    cloneSnapshot(snapshotName, tableName, false);
-  }
-
-  @Override
-  public Future<Void> cloneSnapshotAsync(final String snapshotName, final TableName tableName)
-      throws IOException, TableExistsException {
-    if (tableExists(tableName)) {
-      throw new TableExistsException(tableName);
-    }
-    return internalRestoreSnapshotAsync(snapshotName, tableName, false);
+    return internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl);
   }
 
   @Override
@@ -3977,13 +3825,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-      throws IOException {
-    get(addReplicationPeerAsync(peerId, peerConfig, enabled), this.syncWaitTimeout,
-      TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig,
       boolean enabled) throws IOException {
     AddReplicationPeerResponse response = executeCallable(
@@ -3998,11 +3839,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void removeReplicationPeer(String peerId) throws IOException {
-    get(removeReplicationPeerAsync(peerId), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> removeReplicationPeerAsync(String peerId) throws IOException {
     RemoveReplicationPeerResponse response =
       executeCallable(new MasterCallable<RemoveReplicationPeerResponse>(getConnection(),
@@ -4018,11 +3854,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void enableReplicationPeer(final String peerId) throws IOException {
-    get(enableReplicationPeerAsync(peerId), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> enableReplicationPeerAsync(final String peerId) throws IOException {
     EnableReplicationPeerResponse response =
       executeCallable(new MasterCallable<EnableReplicationPeerResponse>(getConnection(),
@@ -4038,11 +3869,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void disableReplicationPeer(final String peerId) throws IOException {
-    get(disableReplicationPeerAsync(peerId), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> disableReplicationPeerAsync(final String peerId) throws IOException {
     DisableReplicationPeerResponse response =
       executeCallable(new MasterCallable<DisableReplicationPeerResponse>(getConnection(),
@@ -4071,13 +3897,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void updateReplicationPeerConfig(final String peerId,
-      final ReplicationPeerConfig peerConfig) throws IOException {
-    get(updateReplicationPeerConfigAsync(peerId, peerConfig), this.syncWaitTimeout,
-      TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> updateReplicationPeerConfigAsync(final String peerId,
       final ReplicationPeerConfig peerConfig) throws IOException {
     UpdateReplicationPeerConfigResponse response =
@@ -4094,13 +3913,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
-      throws IOException {
-    get(transitReplicationPeerSyncReplicationStateAsync(peerId, state), this.syncWaitTimeout,
-      TimeUnit.MILLISECONDS);
-  }
-
-  @Override
   public Future<Void> transitReplicationPeerSyncReplicationStateAsync(String peerId,
       SyncReplicationState state) throws IOException {
     TransitReplicationPeerSyncReplicationStateResponse response =
@@ -4118,32 +3930,6 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
-  public void appendReplicationPeerTableCFs(String id,
-      Map<TableName, List<String>> tableCfs)
-      throws ReplicationException, IOException {
-    if (tableCfs == null) {
-      throw new ReplicationException("tableCfs is null");
-    }
-    ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
-    ReplicationPeerConfig newPeerConfig =
-        ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(tableCfs, peerConfig);
-    updateReplicationPeerConfig(id, newPeerConfig);
-  }
-
-  @Override
-  public void removeReplicationPeerTableCFs(String id,
-      Map<TableName, List<String>> tableCfs)
-      throws ReplicationException, IOException {
-    if (tableCfs == null) {
-      throw new ReplicationException("tableCfs is null");
-    }
-    ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
-    ReplicationPeerConfig newPeerConfig =
-        ReplicationPeerConfigUtil.removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id);
-    updateReplicationPeerConfig(id, newPeerConfig);
-  }
-
-  @Override
   public List<ReplicationPeerDescription> listReplicationPeers() throws IOException {
     return listReplicationPeers((Pattern)null);
   }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java
index ed72ac1..953fba7 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue;
 import java.io.Closeable;
 import java.lang.annotation.Annotation;
 import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
 import java.util.Arrays;
 import java.util.List;
 import java.util.stream.Collectors;
@@ -32,17 +33,13 @@ import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 @Category({ ClientTests.class, SmallTests.class })
 public class TestInterfaceAlign {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestInterfaceAlign.class);
-
-  private static final Logger LOG = LoggerFactory.getLogger(TestInterfaceAlign.class);
+    HBaseClassTestRule.forClass(TestInterfaceAlign.class);
 
   /**
    * Test methods name match up
@@ -54,6 +51,7 @@ public class TestInterfaceAlign {
 
     // Remove some special methods
     adminMethodNames.remove("getOperationTimeout");
+    adminMethodNames.remove("getSyncWaitTimeout");
     adminMethodNames.remove("getConnection");
     adminMethodNames.remove("getConfiguration");
     adminMethodNames.removeAll(getMethodNames(Abortable.class));
@@ -78,7 +76,8 @@ public class TestInterfaceAlign {
   private <T> List<String> getMethodNames(Class<T> c) {
     // DON'T use the getDeclaredMethods as we want to check the Public APIs only.
     return Arrays.asList(c.getMethods()).stream().filter(m -> !isDeprecated(m))
-        .map(Method::getName).distinct().collect(Collectors.toList());
+      .filter(m -> !Modifier.isStatic(m.getModifiers())).map(Method::getName).distinct()
+      .collect(Collectors.toList());
   }
 
   private boolean isDeprecated(Method method) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java
index 6f0077c..c43bda6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/FutureUtils.java
@@ -24,7 +24,10 @@ import java.util.concurrent.CompletionException;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executor;
 import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.function.BiConsumer;
+import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -136,6 +139,24 @@ public final class FutureUtils {
   }
 
   /**
+   * A helper class for getting the result of a Future, and convert the error to an
+   * {@link IOException}.
+   */
+  public static <T> T get(Future<T> future, long timeout, TimeUnit unit) throws IOException {
+    try {
+      return future.get(timeout, unit);
+    } catch (InterruptedException e) {
+      throw (IOException) new InterruptedIOException().initCause(e);
+    } catch (ExecutionException e) {
+      Throwable cause = e.getCause();
+      Throwables.propagateIfPossible(cause, IOException.class);
+      throw new IOException(cause);
+    } catch (TimeoutException e) {
+      throw new TimeoutIOException(e);
+    }
+  }
+
+  /**
    * Returns a CompletableFuture that is already completed exceptionally with the given exception.
    */
   public static <T> CompletableFuture<T> failedFuture(Throwable e) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
index cc2ee06..fd183fc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.Future;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
@@ -313,7 +314,7 @@ public class TestSnapshotFromMaster {
     // take a snapshot of the table
     String snapshotName = "snapshot";
     byte[] snapshotNameBytes = Bytes.toBytes(snapshotName);
-    admin.snapshot(snapshotNameBytes, TABLE_NAME);
+    admin.snapshot(snapshotName, TABLE_NAME);
 
     LOG.info("After snapshot File-System state");
     FSUtils.logFileSystemState(fs, rootDir, LOG);
@@ -436,12 +437,13 @@ public class TestSnapshotFromMaster {
       table.put(put);
     }
     String snapshotName = "testAsyncSnapshotWillNotBlockSnapshotHFileCleaner01";
-    UTIL.getAdmin().snapshotAsync(new org.apache.hadoop.hbase.client.SnapshotDescription(
+    Future<Void> future =
+      UTIL.getAdmin().snapshotAsync(new org.apache.hadoop.hbase.client.SnapshotDescription(
         snapshotName, TABLE_NAME, SnapshotType.FLUSH));
     Waiter.waitFor(UTIL.getConfiguration(), 10 * 1000L, 200L,
       () -> UTIL.getAdmin().listSnapshots(Pattern.compile(snapshotName)).size() == 1);
     assertTrue(master.getSnapshotManager().isTakingAnySnapshot());
-    Thread.sleep(11 * 1000L);
+    future.get();
     assertFalse(master.getSnapshotManager().isTakingAnySnapshot());
   }
 }
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
index ccc798f..888256e 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
@@ -26,7 +26,6 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Future;
 import java.util.regex.Pattern;
-
 import org.apache.commons.lang3.NotImplementedException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CacheEvictionStats;
@@ -38,6 +37,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.Admin;
@@ -56,11 +56,11 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
 import org.apache.hadoop.hbase.quotas.QuotaSettings;
 import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.thrift2.ThriftUtilities;
 import org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
@@ -79,6 +79,7 @@ public class ThriftAdmin implements Admin {
   private THBaseService.Client client;
   private TTransport transport;
   private int operationTimeout;
+  private int syncWaitTimeout;
   private Configuration conf;
 
 
@@ -86,7 +87,8 @@ public class ThriftAdmin implements Admin {
     this.client = client;
     this.transport = tTransport;
     this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+      HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+    this.syncWaitTimeout = conf.getInt("hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
     this.conf = conf;
   }
 
@@ -96,8 +98,12 @@ public class ThriftAdmin implements Admin {
   }
 
   @Override
-  public void abort(String why, Throwable e) {
+  public int getSyncWaitTimeout() {
+    return syncWaitTimeout;
+  }
 
+  @Override
+  public void abort(String why, Throwable e) {
   }
 
   @Override
@@ -987,7 +993,7 @@ public class ThriftAdmin implements Admin {
   }
 
   @Override
-  public void snapshotAsync(SnapshotDescription snapshot) {
+  public Future<Void> snapshotAsync(SnapshotDescription snapshot) {
     throw new NotImplementedException("snapshotAsync not supported in ThriftAdmin");
 
   }
@@ -1015,44 +1021,14 @@ public class ThriftAdmin implements Admin {
   }
 
   @Override
-  public void restoreSnapshot(byte[] snapshotName, boolean takeFailSafeSnapshot) {
-    throw new NotImplementedException("restoreSnapshot not supported in ThriftAdmin");
-
-  }
-
-  @Override
-  public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot) {
-    throw new NotImplementedException("restoreSnapshot not supported in ThriftAdmin");
-
-  }
-
-  @Override
   public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot,
       boolean restoreAcl) {
     throw new NotImplementedException("restoreSnapshot not supported in ThriftAdmin");
-
-  }
-
-  @Override
-  public void cloneSnapshot(byte[] snapshotName, TableName tableName) {
-    throw new NotImplementedException("cloneSnapshot not supported in ThriftAdmin");
-
-  }
-
-  @Override
-  public void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl) {
-    throw new NotImplementedException("cloneSnapshot not supported in ThriftAdmin");
-
-  }
-
-  @Override
-  public void cloneSnapshot(String snapshotName, TableName tableName) {
-    throw new NotImplementedException("cloneSnapshot not supported in ThriftAdmin");
-
   }
 
   @Override
-  public Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName) {
+  public Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName, boolean cloneAcl)
+      throws IOException, TableExistsException, RestoreSnapshotException {
     throw new NotImplementedException("cloneSnapshotAsync not supported in ThriftAdmin");
   }
 
@@ -1103,43 +1079,36 @@ public class ThriftAdmin implements Admin {
   @Override
   public void deleteSnapshot(byte[] snapshotName) {
     throw new NotImplementedException("deleteSnapshot not supported in ThriftAdmin");
-
   }
 
   @Override
   public void deleteSnapshot(String snapshotName) {
     throw new NotImplementedException("deleteSnapshot not supported in ThriftAdmin");
-
   }
 
   @Override
   public void deleteSnapshots(String regex) {
     throw new NotImplementedException("deleteSnapshots not supported in ThriftAdmin");
-
   }
 
   @Override
   public void deleteSnapshots(Pattern pattern) {
     throw new NotImplementedException("deleteSnapshots not supported in ThriftAdmin");
-
   }
 
   @Override
   public void deleteTableSnapshots(String tableNameRegex, String snapshotNameRegex) {
     throw new NotImplementedException("deleteTableSnapshots not supported in ThriftAdmin");
-
   }
 
   @Override
   public void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) {
     throw new NotImplementedException("deleteTableSnapshots not supported in ThriftAdmin");
-
   }
 
   @Override
   public void setQuota(QuotaSettings quota) {
     throw new NotImplementedException("setQuota not supported in ThriftAdmin");
-
   }
 
   @Override
@@ -1165,13 +1134,11 @@ public class ThriftAdmin implements Admin {
   @Override
   public void updateConfiguration(ServerName server) {
     throw new NotImplementedException("updateConfiguration not supported in ThriftAdmin");
-
   }
 
   @Override
   public void updateConfiguration() {
     throw new NotImplementedException("updateConfiguration not supported in ThriftAdmin");
-
   }
 
   @Override
@@ -1200,46 +1167,22 @@ public class ThriftAdmin implements Admin {
   }
 
   @Override
-  public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled) {
-    throw new NotImplementedException("addReplicationPeer not supported in ThriftAdmin");
-
-  }
-
-  @Override
   public Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig,
       boolean enabled) {
     throw new NotImplementedException("addReplicationPeerAsync not supported in ThriftAdmin");
   }
 
   @Override
-  public void removeReplicationPeer(String peerId) {
-    throw new NotImplementedException("removeReplicationPeer not supported in ThriftAdmin");
-
-  }
-
-  @Override
   public Future<Void> removeReplicationPeerAsync(String peerId) {
     throw new NotImplementedException("removeReplicationPeerAsync not supported in ThriftAdmin");
   }
 
   @Override
-  public void enableReplicationPeer(String peerId) {
-    throw new NotImplementedException("enableReplicationPeer not supported in ThriftAdmin");
-
-  }
-
-  @Override
   public Future<Void> enableReplicationPeerAsync(String peerId) {
     throw new NotImplementedException("enableReplicationPeerAsync not supported in ThriftAdmin");
   }
 
   @Override
-  public void disableReplicationPeer(String peerId) {
-    throw new NotImplementedException("disableReplicationPeer not supported in ThriftAdmin");
-
-  }
-
-  @Override
   public Future<Void> disableReplicationPeerAsync(String peerId) {
     throw new NotImplementedException("disableReplicationPeerAsync not supported in ThriftAdmin");
   }
@@ -1250,12 +1193,6 @@ public class ThriftAdmin implements Admin {
   }
 
   @Override
-  public void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) {
-    throw new NotImplementedException("updateReplicationPeerConfig not supported in ThriftAdmin");
-
-  }
-
-  @Override
   public Future<Void> updateReplicationPeerConfigAsync(String peerId,
       ReplicationPeerConfig peerConfig) {
     throw new NotImplementedException(
@@ -1263,20 +1200,6 @@ public class ThriftAdmin implements Admin {
   }
 
   @Override
-  public void appendReplicationPeerTableCFs(String id, Map<TableName, List<String>> tableCfs)
-      throws ReplicationException, IOException {
-    throw new NotImplementedException("appendReplicationPeerTableCFs not supported in ThriftAdmin");
-
-  }
-
-  @Override
-  public void removeReplicationPeerTableCFs(String id, Map<TableName, List<String>> tableCfs)
-      throws ReplicationException, IOException {
-    throw new NotImplementedException("removeReplicationPeerTableCFs not supported in ThriftAdmin");
-
-  }
-
-  @Override
   public List<ReplicationPeerDescription> listReplicationPeers() {
     throw new NotImplementedException("listReplicationPeers not supported in ThriftAdmin");
   }
@@ -1285,15 +1208,6 @@ public class ThriftAdmin implements Admin {
   public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) {
     throw new NotImplementedException("listReplicationPeers not supported in ThriftAdmin");
   }
-
-  @Override
-  public void transitReplicationPeerSyncReplicationState(String peerId,
-      SyncReplicationState state) {
-    throw new NotImplementedException(
-        "transitReplicationPeerSyncReplicationState not supported in ThriftAdmin");
-
-  }
-
   @Override
   public Future<Void> transitReplicationPeerSyncReplicationStateAsync(String peerId,
       SyncReplicationState state) {


[hbase] 06/24: HBASE-21999 [DEBUG] Exit if git returns empty revision!

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit a3c36f808c19e08c1320c606696634eec3e27501
Author: stack <st...@apache.org>
AuthorDate: Wed Mar 6 16:44:50 2019 -0800

    HBASE-21999 [DEBUG] Exit if git returns empty revision!
---
 hbase-common/src/saveVersion.sh | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/hbase-common/src/saveVersion.sh b/hbase-common/src/saveVersion.sh
index 730224f..97fe6b4 100644
--- a/hbase-common/src/saveVersion.sh
+++ b/hbase-common/src/saveVersion.sh
@@ -18,6 +18,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+set -e
+
 unset LANG
 unset LC_CTYPE
 
@@ -45,10 +47,12 @@ else
   revision="Unknown"
   url="file://$cwd"
 fi
-which md5sum > /dev/null
-if [ "$?" != "0" ] ; then
-  which md5 > /dev/null
-  if [ "$?" != "0" ] ; then
+if [ -z $revision ]; then
+  echo "$revision is empty!"
+  exit 1
+fi
+if ! [  -x "$(command -v md5sum)" ]; then
+  if ! [ -x "$(command -v md5)" ]; then
     srcChecksum="Unknown"
   else
     srcChecksum=`find hbase-*/src/main/ | grep -e "\.java" -e "\.proto" | LC_ALL=C sort | xargs md5 | md5 | cut -d ' ' -f 1`


[hbase] 19/24: HBASE-21987 Simplify RSGroupInfoManagerImpl#flushConfig() for offline mode

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 0850b4bff4dc1c9baf8c9fa269ccdb4d29eb2c58
Author: Xiang Li <li...@freewheel.tv>
AuthorDate: Tue Mar 5 08:00:38 2019 +0000

    HBASE-21987 Simplify RSGroupInfoManagerImpl#flushConfig() for offline mode
    
    Signed-off-by: Xu Cang <xu...@apache.org>
---
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java      | 29 +++++++++++++++++-----
 1 file changed, 23 insertions(+), 6 deletions(-)

diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index c89bba8..17d6481 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -490,20 +490,37 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
     Map<TableName, String> newTableMap;
 
     // For offline mode persistence is still unavailable
-    // We're refreshing in-memory state but only for default servers
+    // We're refreshing in-memory state but only for servers in default group
     if (!isOnline()) {
-      Map<String, RSGroupInfo> m = Maps.newHashMap(rsGroupMap);
-      RSGroupInfo oldDefaultGroup = m.remove(RSGroupInfo.DEFAULT_GROUP);
+      if (newGroupMap == this.rsGroupMap) {
+        // When newGroupMap is this.rsGroupMap itself,
+        // do not need to check default group and other groups as followed
+        return;
+      }
+
+      Map<String, RSGroupInfo> oldGroupMap = Maps.newHashMap(rsGroupMap);
+      RSGroupInfo oldDefaultGroup = oldGroupMap.remove(RSGroupInfo.DEFAULT_GROUP);
       RSGroupInfo newDefaultGroup = newGroupMap.remove(RSGroupInfo.DEFAULT_GROUP);
-      if (!m.equals(newGroupMap) ||
-        !oldDefaultGroup.getTables().equals(newDefaultGroup.getTables())) {
-        throw new IOException("Only default servers can be updated during offline mode");
+      if (!oldGroupMap.equals(newGroupMap) /* compare both tables and servers in other groups */ ||
+          !oldDefaultGroup.getTables().equals(newDefaultGroup.getTables())
+          /* compare tables in default group */) {
+        throw new IOException("Only servers in default group can be updated during offline mode");
       }
+
+      // Restore newGroupMap by putting its default group back
       newGroupMap.put(RSGroupInfo.DEFAULT_GROUP, newDefaultGroup);
+
+      // Refresh rsGroupMap
+      // according to the inputted newGroupMap (an updated copy of rsGroupMap)
       rsGroupMap = newGroupMap;
+
+      // Do not need to update tableMap
+      // because only the update on servers in default group is allowed above,
+      // or IOException will be thrown
       return;
     }
 
+    /* For online mode, persist to Zookeeper */
     newTableMap = flushConfigTable(newGroupMap);
 
     // Make changes visible after having been persisted to the source of truth


[hbase] 20/24: HBASE-21959 - CompactionTool should close the store it uses for compacting files, in order to properly archive compacted files.

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit f1e3e60e5a0856b9f7e81f03023f8b6a6311ae85
Author: Wellington Chevreuil <we...@gmail.com>
AuthorDate: Tue Feb 26 13:36:05 2019 +0000

    HBASE-21959 - CompactionTool should close the store it uses for compacting files, in order to properly archive compacted files.
    
    Signed-off-by: Xu Cang <xu...@apache.org>
---
 .../hadoop/hbase/regionserver/CompactionTool.java  |   2 +
 .../hbase/regionserver/TestCompactionTool.java     | 106 +++++++++++++++++++++
 2 files changed, 108 insertions(+)

diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index 78db6fc..18a5987 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -173,6 +173,8 @@ public class CompactionTool extends Configured implements Tool {
           }
         }
       } while (store.needsCompaction() && !compactOnce);
+      //We need to close the store properly, to make sure it will archive compacted files
+      store.close();
     }
 
     /**
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java
new file mode 100644
index 0000000..b1a5b7a
--- /dev/null
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.File;
+import java.io.FileOutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MediumTests.class, RegionServerTests.class })
+public class TestCompactionTool {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestCompactionTool.class);
+
+  private final HBaseTestingUtility testUtil = new HBaseTestingUtility();
+
+  private HRegion region;
+  private final static byte[] qualifier = Bytes.toBytes("qf");
+  private Path rootDir;
+  private final TableName tableName = TableName.valueOf(getClass().getSimpleName());
+
+  @Before
+  public void setUp() throws Exception {
+    this.testUtil.startMiniCluster();
+    testUtil.createTable(tableName, HBaseTestingUtility.fam1);
+    String defaultFS = testUtil.getMiniHBaseCluster().getConfiguration().get("fs.defaultFS");
+    Configuration config = HBaseConfiguration.create();
+    config.set("fs.defaultFS", defaultFS);
+    String configPath = this.getClass().getClassLoader()
+      .getResource("hbase-site.xml").getFile();
+    config.writeXml(new FileOutputStream(new File(configPath)));
+    rootDir = testUtil.getDefaultRootDirPath();
+    this.region = testUtil.getMiniHBaseCluster().getRegions(tableName).get(0);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    this.testUtil.shutdownMiniCluster();
+    testUtil.cleanupTestDir();
+  }
+
+  @Test
+  public void testCompactedFilesArchived() throws Exception {
+    for (int i = 0; i < 10; i++) {
+      this.putAndFlush(i);
+    }
+    HStore store = region.getStore(HBaseTestingUtility.fam1);
+    assertEquals(10, store.getStorefilesCount());
+    Path tableDir = FSUtils.getTableDir(rootDir, region.getRegionInfo().getTable());
+    FileSystem fs = store.getFileSystem();
+    String storePath = tableDir + "/" + region.getRegionInfo().getEncodedName() + "/"
+      + Bytes.toString(HBaseTestingUtility.fam1);
+    FileStatus[] regionDirFiles = fs.listStatus(new Path(storePath));
+    assertEquals(10, regionDirFiles.length);
+    int result = ToolRunner.run(HBaseConfiguration.create(), new CompactionTool(),
+      new String[]{"-compactOnce", "-major", storePath});
+    assertEquals(0,result);
+    regionDirFiles = fs.listStatus(new Path(storePath));
+    assertEquals(1, regionDirFiles.length);
+  }
+
+  private void putAndFlush(int key) throws Exception{
+    Put put = new Put(Bytes.toBytes(key));
+    put.addColumn(HBaseTestingUtility.fam1, qualifier, Bytes.toBytes("val" + key));
+    region.put(put);
+    region.flush(true);
+  }
+
+}


[hbase] 18/24: HBASE-21135 Build fails on windows as it fails to parse windows path during license check

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit a6b94d8b61e6e495f79990b388bc20d72fc1cce1
Author: Nihal Jain <ni...@gmail.com>
AuthorDate: Fri Aug 31 16:01:01 2018 +0530

    HBASE-21135 Build fails on windows as it fails to parse windows path during license check
    
    Signed-off-by: Mike Drob <md...@apache.org>
    Signed-off-by: Sean Busbey <bu...@apache.org>
---
 pom.xml | 24 ++++++++++++++++++++----
 1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/pom.xml b/pom.xml
index 86d3d26..5498808 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1027,6 +1027,26 @@
           </execution>
         </executions>
       </plugin>
+      <!-- sets where to find the generated LICENSE files -->
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>create-license-file-path-property</id>
+            <goals>
+              <goal>regex-property</goal>
+            </goals>
+            <configuration>
+              <name>license.aggregate.path</name>
+              <value>${project.build.directory}/maven-shared-archive-resources/META-INF/LICENSE</value>
+              <regex>\\</regex>
+              <replacement>/</replacement>
+              <failIfNoMatch>false</failIfNoMatch>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-enforcer-plugin</artifactId>
@@ -1468,10 +1488,6 @@
     <license.bundles.bootstrap>false</license.bundles.bootstrap>
     <!-- modules that include jquery in their source tree should set true -->
     <license.bundles.jquery>false</license.bundles.jquery>
-    <!-- where to find the generated LICENSE files -->
-    <license.aggregate.path>
-      ${project.build.directory}/maven-shared-archive-resources/META-INF/LICENSE
-    </license.aggregate.path>
     <tar.name>${project.build.finalName}.tar.gz</tar.name>
     <maven.build.timestamp.format>
       yyyy-MM-dd'T'HH:mm


[hbase] 24/24: HBASE-22016 Rewrite the block reading methods by using hbase.nio.ByteBuff

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 6d953350c01c4f02b70b84804664d42183488ef2
Author: huzheng <op...@gmail.com>
AuthorDate: Fri Mar 8 16:46:06 2019 +0800

    HBASE-22016 Rewrite the block reading methods by using hbase.nio.ByteBuff
---
 .../apache/hadoop/hbase/io/hfile/BlockIOUtils.java | 223 ++++++++++++++
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   | 337 ++++++++-------------
 ...ckPositionalRead.java => TestBlockIOUtils.java} | 122 ++++++--
 .../apache/hadoop/hbase/io/hfile/TestChecksum.java |  14 +-
 4 files changed, 453 insertions(+), 243 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockIOUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockIOUtils.java
new file mode 100644
index 0000000..dbd5b2e
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockIOUtils.java
@@ -0,0 +1,223 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io.hfile;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+class BlockIOUtils {
+
+  static boolean isByteBufferReadable(FSDataInputStream is) {
+    InputStream cur = is.getWrappedStream();
+    for (;;) {
+      if ((cur instanceof FSDataInputStream)) {
+        cur = ((FSDataInputStream) cur).getWrappedStream();
+      } else {
+        break;
+      }
+    }
+    return cur instanceof ByteBufferReadable;
+  }
+
+  /**
+   * Read length bytes into ByteBuffers directly.
+   * @param buf the destination {@link ByteBuff}
+   * @param dis the HDFS input stream which implement the ByteBufferReadable interface.
+   * @param length bytes to read.
+   * @throws IOException exception to throw if any error happen
+   */
+  static void readFully(ByteBuff buf, FSDataInputStream dis, int length) throws IOException {
+    if (!isByteBufferReadable(dis)) {
+      // If InputStream does not support the ByteBuffer read, just read to heap and copy bytes to
+      // the destination ByteBuff.
+      byte[] heapBuf = new byte[length];
+      IOUtils.readFully(dis, heapBuf, 0, length);
+      copyToByteBuff(heapBuf, 0, length, buf);
+      return;
+    }
+    ByteBuffer[] buffers = buf.nioByteBuffers();
+    int remain = length;
+    int idx = 0;
+    ByteBuffer cur = buffers[idx];
+    while (remain > 0) {
+      while (!cur.hasRemaining()) {
+        if (++idx >= buffers.length) {
+          throw new IOException(
+              "Not enough ByteBuffers to read the reminding " + remain + " " + "bytes");
+        }
+        cur = buffers[idx];
+      }
+      cur.limit(cur.position() + Math.min(remain, cur.remaining()));
+      int bytesRead = dis.read(cur);
+      if (bytesRead < 0) {
+        throw new IOException(
+            "Premature EOF from inputStream, but still need " + remain + " " + "bytes");
+      }
+      remain -= bytesRead;
+    }
+  }
+
+  /**
+   * Read from an input stream at least <code>necessaryLen</code> and if possible,
+   * <code>extraLen</code> also if available. Analogous to
+   * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but specifies a number of "extra"
+   * bytes to also optionally read.
+   * @param in the input stream to read from
+   * @param buf the buffer to read into
+   * @param bufOffset the destination offset in the buffer
+   * @param necessaryLen the number of bytes that are absolutely necessary to read
+   * @param extraLen the number of extra bytes that would be nice to read
+   * @return true if succeeded reading the extra bytes
+   * @throws IOException if failed to read the necessary bytes
+   */
+  private static boolean readWithExtraOnHeap(InputStream in, byte[] buf, int bufOffset,
+      int necessaryLen, int extraLen) throws IOException {
+    int bytesRemaining = necessaryLen + extraLen;
+    while (bytesRemaining > 0) {
+      int ret = in.read(buf, bufOffset, bytesRemaining);
+      if (ret < 0) {
+        if (bytesRemaining <= extraLen) {
+          // We could not read the "extra data", but that is OK.
+          break;
+        }
+        throw new IOException("Premature EOF from inputStream (read " + "returned " + ret
+            + ", was trying to read " + necessaryLen + " necessary bytes and " + extraLen
+            + " extra bytes, " + "successfully read " + (necessaryLen + extraLen - bytesRemaining));
+      }
+      bufOffset += ret;
+      bytesRemaining -= ret;
+    }
+    return bytesRemaining <= 0;
+  }
+
+  /**
+   * Read bytes into ByteBuffers directly, those buffers either contains the extraLen bytes or only
+   * contains necessaryLen bytes, which depends on how much bytes do the last time we read.
+   * @param buf the destination {@link ByteBuff}.
+   * @param dis input stream to read.
+   * @param necessaryLen bytes which we must read
+   * @param extraLen bytes which we may read
+   * @return if the returned flag is true, then we've finished to read the extraLen into our
+   *         ByteBuffers, otherwise we've not read the extraLen bytes yet.
+   * @throws IOException if failed to read the necessary bytes.
+   */
+  static boolean readWithExtra(ByteBuff buf, FSDataInputStream dis, int necessaryLen, int extraLen)
+      throws IOException {
+    if (!isByteBufferReadable(dis)) {
+      // If InputStream does not support the ByteBuffer read, just read to heap and copy bytes to
+      // the destination ByteBuff.
+      byte[] heapBuf = new byte[necessaryLen + extraLen];
+      boolean ret = readWithExtraOnHeap(dis, heapBuf, 0, necessaryLen, extraLen);
+      copyToByteBuff(heapBuf, 0, heapBuf.length, buf);
+      return ret;
+    }
+    ByteBuffer[] buffers = buf.nioByteBuffers();
+    int bytesRead = 0;
+    int remain = necessaryLen + extraLen;
+    int idx = 0;
+    ByteBuffer cur = buffers[idx];
+    while (bytesRead < necessaryLen) {
+      while (!cur.hasRemaining()) {
+        if (++idx >= buffers.length) {
+          throw new IOException("Not enough ByteBuffers to read the reminding " + remain + "bytes");
+        }
+        cur = buffers[idx];
+      }
+      cur.limit(cur.position() + Math.min(remain, cur.remaining()));
+      int ret = dis.read(cur);
+      if (ret < 0) {
+        throw new IOException("Premature EOF from inputStream (read returned " + ret
+            + ", was trying to read " + necessaryLen + " necessary bytes and " + extraLen
+            + " extra bytes, successfully read " + bytesRead);
+      }
+      bytesRead += ret;
+      remain -= ret;
+    }
+    return (extraLen > 0) && (bytesRead == necessaryLen + extraLen);
+  }
+
+  /**
+   * Read from an input stream at least <code>necessaryLen</code> and if possible,
+   * <code>extraLen</code> also if available. Analogous to
+   * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but uses positional read and
+   * specifies a number of "extra" bytes that would be desirable but not absolutely necessary to
+   * read.
+   * @param buff ByteBuff to read into.
+   * @param dis the input stream to read from
+   * @param position the position within the stream from which to start reading
+   * @param necessaryLen the number of bytes that are absolutely necessary to read
+   * @param extraLen the number of extra bytes that would be nice to read
+   * @return true if and only if extraLen is > 0 and reading those extra bytes was successful
+   * @throws IOException if failed to read the necessary bytes
+   */
+  static boolean preadWithExtra(ByteBuff buff, FSDataInputStream dis, long position,
+      int necessaryLen, int extraLen) throws IOException {
+    int remain = necessaryLen + extraLen;
+    byte[] buf = new byte[remain];
+    int bytesRead = 0;
+    while (bytesRead < necessaryLen) {
+      int ret = dis.read(position + bytesRead, buf, bytesRead, remain);
+      if (ret < 0) {
+        throw new IOException("Premature EOF from inputStream (positional read returned " + ret
+            + ", was trying to read " + necessaryLen + " necessary bytes and " + extraLen
+            + " extra bytes, successfully read " + bytesRead);
+      }
+      bytesRead += ret;
+      remain -= ret;
+    }
+    // Copy the bytes from on-heap bytes[] to ByteBuffer[] now, and after resolving HDFS-3246, we
+    // will read the bytes to ByteBuffer[] directly without allocating any on-heap byte[].
+    // TODO I keep the bytes copy here, because I want to abstract the ByteBuffer[]
+    // preadWithExtra method for the upper layer, only need to refactor this method if the
+    // ByteBuffer pread is OK.
+    copyToByteBuff(buf, 0, bytesRead, buff);
+    return (extraLen > 0) && (bytesRead == necessaryLen + extraLen);
+  }
+
+  private static int copyToByteBuff(byte[] buf, int offset, int len, ByteBuff out)
+      throws IOException {
+    if (offset < 0 || len < 0 || offset + len > buf.length) {
+      throw new IOException("Invalid offset=" + offset + " and len=" + len + ", cap=" + buf.length);
+    }
+    ByteBuffer[] buffers = out.nioByteBuffers();
+    int idx = 0, remain = len, copyLen;
+    ByteBuffer cur = buffers[idx];
+    while (remain > 0) {
+      while (!cur.hasRemaining()) {
+        if (++idx >= buffers.length) {
+          throw new IOException("Not enough ByteBuffers to read the reminding " + remain + "bytes");
+        }
+        cur = buffers[idx];
+      }
+      copyLen = Math.min(cur.remaining(), remain);
+      cur.put(buf, offset, copyLen);
+      remain -= copyLen;
+      offset += copyLen;
+    }
+    return len;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 91e63fd..4773678 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -21,7 +21,6 @@ import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.nio.ByteBuffer;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.Lock;
@@ -51,7 +50,6 @@ import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.hadoop.io.IOUtils;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
@@ -280,9 +278,7 @@ public class HFileBlock implements Cacheable {
       boolean usesChecksum = buf.get() == (byte) 1;
       long offset = buf.getLong();
       int nextBlockOnDiskSize = buf.getInt();
-      HFileBlock hFileBlock =
-          new HFileBlock(newByteBuff, usesChecksum, memType, offset, nextBlockOnDiskSize, null);
-      return hFileBlock;
+      return new HFileBlock(newByteBuff, usesChecksum, memType, offset, nextBlockOnDiskSize, null);
     }
 
     @Override
@@ -315,9 +311,9 @@ public class HFileBlock implements Cacheable {
    * param.
    */
   private HFileBlock(HFileBlock that, boolean bufCopy) {
-    init(that.blockType, that.onDiskSizeWithoutHeader,
-        that.uncompressedSizeWithoutHeader, that.prevBlockOffset,
-        that.offset, that.onDiskDataSizeWithHeader, that.nextBlockOnDiskSize, that.fileContext);
+    init(that.blockType, that.onDiskSizeWithoutHeader, that.uncompressedSizeWithoutHeader,
+      that.prevBlockOffset, that.offset, that.onDiskDataSizeWithHeader, that.nextBlockOnDiskSize,
+      that.fileContext);
     if (bufCopy) {
       this.buf = new SingleByteBuff(ByteBuffer.wrap(that.buf.toBytes(0, that.buf.limit())));
     } else {
@@ -331,6 +327,7 @@ public class HFileBlock implements Cacheable {
    * and is sitting in a byte buffer and we want to stuff the block into cache.
    *
    * <p>TODO: The caller presumes no checksumming
+   * <p>TODO: HFile block writer can also off-heap ? </p>
    * required of this block instance since going into cache; checksum already verified on
    * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD?
    *
@@ -349,8 +346,8 @@ public class HFileBlock implements Cacheable {
       int uncompressedSizeWithoutHeader, long prevBlockOffset, ByteBuffer b, boolean fillHeader,
       long offset, final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader,
       HFileContext fileContext) {
-    init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
-        prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext);
+    init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, offset,
+      onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext);
     this.buf = new SingleByteBuff(b);
     if (fillHeader) {
       overwriteHeader();
@@ -366,7 +363,8 @@ public class HFileBlock implements Cacheable {
    * @param buf Has header, content, and trailing checksums if present.
    */
   HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset,
-      final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException {
+      final int nextBlockOnDiskSize, HFileContext fileContext)
+      throws IOException {
     buf.rewind();
     final BlockType blockType = BlockType.read(buf);
     final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX);
@@ -394,8 +392,8 @@ public class HFileBlock implements Cacheable {
     }
     fileContext = fileContextBuilder.build();
     assert usesHBaseChecksum == fileContext.isUseHBaseChecksum();
-    init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
-        prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext);
+    init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, offset,
+      onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext);
     this.memType = memType;
     this.offset = offset;
     this.buf = buf;
@@ -406,9 +404,8 @@ public class HFileBlock implements Cacheable {
    * Called from constructors.
    */
   private void init(BlockType blockType, int onDiskSizeWithoutHeader,
-      int uncompressedSizeWithoutHeader, long prevBlockOffset,
-      long offset, int onDiskDataSizeWithHeader, final int nextBlockOnDiskSize,
-      HFileContext fileContext) {
+      int uncompressedSizeWithoutHeader, long prevBlockOffset, long offset,
+      int onDiskDataSizeWithHeader, final int nextBlockOnDiskSize, HFileContext fileContext) {
     this.blockType = blockType;
     this.onDiskSizeWithoutHeader = onDiskSizeWithoutHeader;
     this.uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader;
@@ -425,10 +422,9 @@ public class HFileBlock implements Cacheable {
    * @param verifyChecksum true if checksum verification is in use.
    * @return Size of the block with header included.
    */
-  private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf,
+  private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf,
       boolean verifyChecksum) {
-    return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) +
-      headerSize(verifyChecksum);
+    return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + headerSize(verifyChecksum);
   }
 
   /**
@@ -651,9 +647,10 @@ public class HFileBlock implements Cacheable {
     ByteBuff dup = this.buf.duplicate();
     dup.position(this.headerSize());
     dup = dup.slice();
+
     ctx.prepareDecoding(unpacked.getOnDiskSizeWithoutHeader(),
-      unpacked.getUncompressedSizeWithoutHeader(), unpacked.getBufferWithoutHeader(),
-      dup);
+      unpacked.getUncompressedSizeWithoutHeader(), unpacked.getBufferWithoutHeader(), dup);
+
     return unpacked;
   }
 
@@ -667,15 +664,14 @@ public class HFileBlock implements Cacheable {
     int headerSize = headerSize();
     int capacityNeeded = headerSize + uncompressedSizeWithoutHeader + cksumBytes;
 
-    // TODO we need consider allocating offheap here?
-    ByteBuffer newBuf = ByteBuffer.allocate(capacityNeeded);
+    ByteBuff newBuf = new SingleByteBuff(ByteBuffer.allocate(capacityNeeded));
 
     // Copy header bytes into newBuf.
     // newBuf is HBB so no issue in calling array()
     buf.position(0);
-    buf.get(newBuf.array(), newBuf.arrayOffset(), headerSize);
+    newBuf.put(0, buf, 0, headerSize);
 
-    buf = new SingleByteBuff(newBuf);
+    buf = newBuf;
     // set limit to exclude next block's header
     buf.limit(headerSize + uncompressedSizeWithoutHeader + cksumBytes);
   }
@@ -692,17 +688,6 @@ public class HFileBlock implements Cacheable {
     return bufCapacity == expectedCapacity || bufCapacity == expectedCapacity + headerSize;
   }
 
-  /** An additional sanity-check in case no compression or encryption is being used. */
-  @VisibleForTesting
-  void sanityCheckUncompressedSize() throws IOException {
-    if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) {
-      throw new IOException("Using no compression but "
-          + "onDiskSizeWithoutHeader=" + onDiskSizeWithoutHeader + ", "
-          + "uncompressedSizeWithoutHeader=" + uncompressedSizeWithoutHeader
-          + ", numChecksumbytes=" + totalChecksumBytes());
-    }
-  }
-
   /**
    * Cannot be {@link #UNSET}. Must be a legitimate value. Used re-making the {@link BlockCacheKey} when
    * block is returned to the cache.
@@ -748,82 +733,6 @@ public class HFileBlock implements Cacheable {
   }
 
   /**
-   * Read from an input stream at least <code>necessaryLen</code> and if possible,
-   * <code>extraLen</code> also if available. Analogous to
-   * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but specifies a
-   * number of "extra" bytes to also optionally read.
-   *
-   * @param in the input stream to read from
-   * @param buf the buffer to read into
-   * @param bufOffset the destination offset in the buffer
-   * @param necessaryLen the number of bytes that are absolutely necessary to read
-   * @param extraLen the number of extra bytes that would be nice to read
-   * @return true if succeeded reading the extra bytes
-   * @throws IOException if failed to read the necessary bytes
-   */
-  static boolean readWithExtra(InputStream in, byte[] buf,
-      int bufOffset, int necessaryLen, int extraLen) throws IOException {
-    int bytesRemaining = necessaryLen + extraLen;
-    while (bytesRemaining > 0) {
-      int ret = in.read(buf, bufOffset, bytesRemaining);
-      if (ret == -1 && bytesRemaining <= extraLen) {
-        // We could not read the "extra data", but that is OK.
-        break;
-      }
-      if (ret < 0) {
-        throw new IOException("Premature EOF from inputStream (read "
-            + "returned " + ret + ", was trying to read " + necessaryLen
-            + " necessary bytes and " + extraLen + " extra bytes, "
-            + "successfully read "
-            + (necessaryLen + extraLen - bytesRemaining));
-      }
-      bufOffset += ret;
-      bytesRemaining -= ret;
-    }
-    return bytesRemaining <= 0;
-  }
-
-  /**
-   * Read from an input stream at least <code>necessaryLen</code> and if possible,
-   * <code>extraLen</code> also if available. Analogous to
-   * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but uses
-   * positional read and specifies a number of "extra" bytes that would be
-   * desirable but not absolutely necessary to read.
-   *
-   * @param in the input stream to read from
-   * @param position the position within the stream from which to start reading
-   * @param buf the buffer to read into
-   * @param bufOffset the destination offset in the buffer
-   * @param necessaryLen the number of bytes that are absolutely necessary to
-   *     read
-   * @param extraLen the number of extra bytes that would be nice to read
-   * @return true if and only if extraLen is > 0 and reading those extra bytes
-   *     was successful
-   * @throws IOException if failed to read the necessary bytes
-   */
-  @VisibleForTesting
-  static boolean positionalReadWithExtra(FSDataInputStream in,
-      long position, byte[] buf, int bufOffset, int necessaryLen, int extraLen)
-      throws IOException {
-    int bytesRemaining = necessaryLen + extraLen;
-    int bytesRead = 0;
-    while (bytesRead < necessaryLen) {
-      int ret = in.read(position, buf, bufOffset, bytesRemaining);
-      if (ret < 0) {
-        throw new IOException("Premature EOF from inputStream (positional read "
-            + "returned " + ret + ", was trying to read " + necessaryLen
-            + " necessary bytes and " + extraLen + " extra bytes, "
-            + "successfully read " + bytesRead);
-      }
-      position += ret;
-      bufOffset += ret;
-      bytesRemaining -= ret;
-      bytesRead += ret;
-    }
-    return bytesRead != necessaryLen && bytesRemaining <= 0;
-  }
-
-  /**
    * Unified version 2 {@link HFile} block writer. The intended usage pattern
    * is as follows:
    * <ol>
@@ -988,18 +897,6 @@ public class HFileBlock implements Cacheable {
     }
 
     /**
-     * Returns the stream for the user to write to. The block writer takes care
-     * of handling compression and buffering for caching on write. Can only be
-     * called in the "writing" state.
-     *
-     * @return the data output stream for the user to write to
-     */
-    DataOutputStream getUserDataStream() {
-      expectState(State.WRITING);
-      return userDataStream;
-    }
-
-    /**
      * Transitions the block writer from the "writing" state to the "block
      * ready" state.  Does nothing if a block is already finished.
      */
@@ -1261,11 +1158,9 @@ public class HFileBlock implements Cacheable {
     }
 
     /**
-     * Clones the header followed by the on-disk (compressed/encoded/encrypted) data. This is
-     * needed for storing packed blocks in the block cache. Expects calling semantics identical to
-     * {@link #getUncompressedBufferWithHeader()}. Returns only the header and data,
-     * Does not include checksum data.
-     *
+     * Clones the header followed by the on-disk (compressed/encoded/encrypted) data. This is needed
+     * for storing packed blocks in the block cache. Returns only the header and data, Does not
+     * include checksum data.
      * @return Returns a copy of block bytes for caching on write
      */
     private ByteBuffer cloneOnDiskBufferWithHeader() {
@@ -1321,11 +1216,10 @@ public class HFileBlock implements Cacheable {
                                 .withIncludesMvcc(fileContext.isIncludesMvcc())
                                 .withIncludesTags(fileContext.isIncludesTags())
                                 .build();
-       return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
+      return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
           getUncompressedSizeWithoutHeader(), prevOffset,
-          cacheConf.shouldCacheCompressed(blockType.getCategory())?
-            cloneOnDiskBufferWithHeader() :
-            cloneUncompressedBufferWithHeader(),
+          cacheConf.shouldCacheCompressed(blockType.getCategory()) ? cloneOnDiskBufferWithHeader()
+              : cloneUncompressedBufferWithHeader(),
           FILL_HEADER, startOffset, UNSET,
           onDiskBlockBytesWithHeader.size() + onDiskChecksum.length, newContext);
     }
@@ -1415,8 +1309,8 @@ public class HFileBlock implements Cacheable {
    */
   private static class PrefetchedHeader {
     long offset = -1;
-    byte [] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE];
-    final ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE);
+    byte[] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE];
+    final ByteBuff buf = new SingleByteBuff(ByteBuffer.wrap(header, 0, header.length));
 
     @Override
     public String toString() {
@@ -1479,11 +1373,11 @@ public class HFileBlock implements Cacheable {
     }
 
     /**
-     * A constructor that reads files with the latest minor version.
-     * This is used by unit tests only.
+     * A constructor that reads files with the latest minor version. This is used by unit tests
+     * only.
      */
     FSReaderImpl(FSDataInputStream istream, long fileSize, HFileContext fileContext)
-    throws IOException {
+        throws IOException {
       this(new FSDataInputStreamWrapper(istream), fileSize, null, null, fileContext);
     }
 
@@ -1520,60 +1414,49 @@ public class HFileBlock implements Cacheable {
     }
 
     /**
-     * Does a positional read or a seek and read into the given buffer. Returns
-     * the on-disk size of the next block, or -1 if it could not be read/determined; e.g. EOF.
-     *
+     * Does a positional read or a seek and read into the given byte buffer. We need take care that
+     * we will call the {@link ByteBuff#release()} for every exit to deallocate the ByteBuffers,
+     * otherwise the memory leak may happen.
      * @param dest destination buffer
-     * @param destOffset offset into the destination buffer at where to put the bytes we read
      * @param size size of read
      * @param peekIntoNextBlock whether to read the next block's on-disk size
      * @param fileOffset position in the stream to read at
      * @param pread whether we should do a positional read
      * @param istream The input source of data
-     * @return the on-disk size of the next block with header size included, or
-     *         -1 if it could not be determined; if not -1, the <code>dest</code> INCLUDES the
-     *         next header
-     * @throws IOException
+     * @return true to indicate the destination buffer include the next block header, otherwise only
+     *         include the current block data without the next block header.
+     * @throws IOException if any IO error happen.
      */
-    @VisibleForTesting
-    protected int readAtOffset(FSDataInputStream istream, byte[] dest, int destOffset, int size,
-        boolean peekIntoNextBlock, long fileOffset, boolean pread)
-        throws IOException {
-      if (peekIntoNextBlock && destOffset + size + hdrSize > dest.length) {
-        // We are asked to read the next block's header as well, but there is
-        // not enough room in the array.
-        throw new IOException("Attempted to read " + size + " bytes and " + hdrSize +
-            " bytes of next header into a " + dest.length + "-byte array at offset " + destOffset);
-      }
-
+    protected boolean readAtOffset(FSDataInputStream istream, ByteBuff dest, int size,
+        boolean peekIntoNextBlock, long fileOffset, boolean pread) throws IOException {
       if (!pread) {
         // Seek + read. Better for scanning.
         HFileUtil.seekOnMultipleSources(istream, fileOffset);
-        // TODO: do we need seek time latencies?
         long realOffset = istream.getPos();
         if (realOffset != fileOffset) {
-          throw new IOException("Tried to seek to " + fileOffset + " to " + "read " + size +
-              " bytes, but pos=" + realOffset + " after seek");
+          throw new IOException("Tried to seek to " + fileOffset + " to read " + size
+              + " bytes, but pos=" + realOffset + " after seek");
         }
-
         if (!peekIntoNextBlock) {
-          IOUtils.readFully(istream, dest, destOffset, size);
-          return -1;
+          BlockIOUtils.readFully(dest, istream, size);
+          return false;
         }
 
-        // Try to read the next block header.
-        if (!readWithExtra(istream, dest, destOffset, size, hdrSize)) {
-          return -1;
+        // Try to read the next block header
+        if (!BlockIOUtils.readWithExtra(dest, istream, size, hdrSize)) {
+          // did not read the next block header.
+          return false;
         }
       } else {
         // Positional read. Better for random reads; or when the streamLock is already locked.
         int extraSize = peekIntoNextBlock ? hdrSize : 0;
-        if (!positionalReadWithExtra(istream, fileOffset, dest, destOffset, size, extraSize)) {
-          return -1;
+        if (!BlockIOUtils.preadWithExtra(dest, istream, fileOffset, size, extraSize)) {
+          // did not read the next block header.
+          return false;
         }
       }
       assert peekIntoNextBlock;
-      return Bytes.toInt(dest, destOffset + size + BlockType.MAGIC_LENGTH) + hdrSize;
+      return true;
     }
 
     /**
@@ -1672,7 +1555,7 @@ public class HFileBlock implements Cacheable {
      * is not right.
      * @throws IOException
      */
-    private void verifyOnDiskSizeMatchesHeader(final int passedIn, final ByteBuffer headerBuf,
+    private void verifyOnDiskSizeMatchesHeader(final int passedIn, final ByteBuff headerBuf,
         final long offset, boolean verifyChecksum)
     throws IOException {
       // Assert size provided aligns with what is in the header
@@ -1691,11 +1574,11 @@ public class HFileBlock implements Cacheable {
      * we have to backup the stream because we over-read (the next block's header).
      * @see PrefetchedHeader
      * @return The cached block header or null if not found.
-     * @see #cacheNextBlockHeader(long, byte[], int, int)
+     * @see #cacheNextBlockHeader(long, ByteBuff, int, int)
      */
-    private ByteBuffer getCachedHeader(final long offset) {
+    private ByteBuff getCachedHeader(final long offset) {
       PrefetchedHeader ph = this.prefetchedHeader.get();
-      return ph != null && ph.offset == offset? ph.buf: null;
+      return ph != null && ph.offset == offset ? ph.buf : null;
     }
 
     /**
@@ -1704,13 +1587,24 @@ public class HFileBlock implements Cacheable {
      * @see PrefetchedHeader
      */
     private void cacheNextBlockHeader(final long offset,
-        final byte [] header, final int headerOffset, final int headerLength) {
+        ByteBuff onDiskBlock, int onDiskSizeWithHeader, int headerLength) {
       PrefetchedHeader ph = new PrefetchedHeader();
       ph.offset = offset;
-      System.arraycopy(header, headerOffset, ph.header, 0, headerLength);
+      onDiskBlock.get(onDiskSizeWithHeader, ph.header, 0, headerLength);
       this.prefetchedHeader.set(ph);
     }
 
+    private int getNextBlockOnDiskSize(boolean readNextHeader, ByteBuff onDiskBlock,
+        int onDiskSizeWithHeader) {
+      int nextBlockOnDiskSize = -1;
+      if (readNextHeader) {
+        nextBlockOnDiskSize =
+            onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH)
+                + hdrSize;
+      }
+      return nextBlockOnDiskSize;
+    }
+
     /**
      * Reads a version 2 block.
      *
@@ -1737,7 +1631,7 @@ public class HFileBlock implements Cacheable {
       // Try and get cached header. Will serve us in rare case where onDiskSizeWithHeaderL is -1
       // and will save us having to seek the stream backwards to reread the header we
       // read the last time through here.
-      ByteBuffer headerBuf = getCachedHeader(offset);
+      ByteBuff headerBuf = getCachedHeader(offset);
       LOG.trace("Reading {} at offset={}, pread={}, verifyChecksum={}, cachedHeader={}, " +
           "onDiskSizeWithHeader={}", this.fileContext.getHFileName(), offset, pread,
           verifyChecksum, headerBuf, onDiskSizeWithHeader);
@@ -1757,9 +1651,9 @@ public class HFileBlock implements Cacheable {
           if (LOG.isTraceEnabled()) {
             LOG.trace("Extra see to get block size!", new RuntimeException());
           }
-          headerBuf = ByteBuffer.allocate(hdrSize);
-          readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-              offset, pread);
+          headerBuf = new SingleByteBuff(ByteBuffer.allocate(hdrSize));
+          readAtOffset(is, headerBuf, hdrSize, false, offset, pread);
+          headerBuf.rewind();
         }
         onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport);
       }
@@ -1770,46 +1664,55 @@ public class HFileBlock implements Cacheable {
       // says where to start reading. If we have the header cached, then we don't need to read
       // it again and we can likely read from last place we left off w/o need to backup and reread
       // the header we read last time through here.
-      // TODO: Make this ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-      byte [] onDiskBlock = new byte[onDiskSizeWithHeader + hdrSize];
-      int nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, preReadHeaderSize,
+      ByteBuff onDiskBlock =
+          new SingleByteBuff(ByteBuffer.allocate(onDiskSizeWithHeader + hdrSize));
+      boolean initHFileBlockSuccess = false;
+      try {
+        if (headerBuf != null) {
+          onDiskBlock.put(0, headerBuf, 0, hdrSize).position(hdrSize);
+        }
+        boolean readNextHeader = readAtOffset(is, onDiskBlock,
           onDiskSizeWithHeader - preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-      if (headerBuf != null) {
-        // The header has been read when reading the previous block OR in a distinct header-only
-        // read. Copy to this block's header.
-        System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize);
-      } else {
-        headerBuf = ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-      }
-      // Do a few checks before we go instantiate HFileBlock.
-      assert onDiskSizeWithHeader > this.hdrSize;
-      verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, checksumSupport);
-      ByteBuff onDiskBlockByteBuff =
-          new SingleByteBuff(ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader));
-      // Verify checksum of the data before using it for building HFileBlock.
-      if (verifyChecksum && !validateChecksum(offset, onDiskBlockByteBuff, hdrSize)) {
-        return null;
-      }
-      long duration = System.currentTimeMillis() - startTime;
-      if (updateMetrics) {
-        HFile.updateReadLatency(duration, pread);
-      }
-      // The onDiskBlock will become the headerAndDataBuffer for this block.
-      // If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-      // contains the header of next block, so no need to set next block's header in it.
-      HFileBlock hFileBlock = new HFileBlock(onDiskBlockByteBuff, checksumSupport,
-          MemoryType.EXCLUSIVE, offset, nextBlockOnDiskSize, fileContext);
-      // Run check on uncompressed sizings.
-      if (!fileContext.isCompressedOrEncrypted()) {
-        hFileBlock.sanityCheckUncompressed();
-      }
-      LOG.trace("Read {} in {} ns", hFileBlock, duration);
-      // Cache next block header if we read it for the next time through here.
-      if (nextBlockOnDiskSize != -1) {
-        cacheNextBlockHeader(offset + hFileBlock.getOnDiskSizeWithHeader(),
-            onDiskBlock, onDiskSizeWithHeader, hdrSize);
+        onDiskBlock.rewind(); // in case of moving position when copying a cached header
+        int nextBlockOnDiskSize =
+            getNextBlockOnDiskSize(readNextHeader, onDiskBlock, onDiskSizeWithHeader);
+        if (headerBuf == null) {
+          headerBuf = onDiskBlock.duplicate().position(0).limit(hdrSize);
+        }
+        // Do a few checks before we go instantiate HFileBlock.
+        assert onDiskSizeWithHeader > this.hdrSize;
+        verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, checksumSupport);
+        ByteBuff curBlock = onDiskBlock.duplicate().limit(onDiskSizeWithHeader);
+        // Verify checksum of the data before using it for building HFileBlock.
+        if (verifyChecksum && !validateChecksum(offset, curBlock, hdrSize)) {
+          return null;
+        }
+        long duration = System.currentTimeMillis() - startTime;
+        if (updateMetrics) {
+          HFile.updateReadLatency(duration, pread);
+        }
+        // The onDiskBlock will become the headerAndDataBuffer for this block.
+        // If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
+        // contains the header of next block, so no need to set next block's header in it.
+        HFileBlock hFileBlock = new HFileBlock(curBlock, checksumSupport, MemoryType.EXCLUSIVE,
+            offset, nextBlockOnDiskSize, fileContext);
+        // Run check on uncompressed sizings.
+        if (!fileContext.isCompressedOrEncrypted()) {
+          hFileBlock.sanityCheckUncompressed();
+        }
+        LOG.trace("Read {} in {} ns", hFileBlock, duration);
+        // Cache next block header if we read it for the next time through here.
+        if (nextBlockOnDiskSize != -1) {
+          cacheNextBlockHeader(offset + hFileBlock.getOnDiskSizeWithHeader(), onDiskBlock,
+            onDiskSizeWithHeader, hdrSize);
+        }
+        initHFileBlockSuccess = true;
+        return hFileBlock;
+      } finally {
+        if (!initHFileBlockSuccess) {
+          onDiskBlock.release();
+        }
       }
-      return hFileBlock;
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockPositionalRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockIOUtils.java
similarity index 54%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockPositionalRead.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockIOUtils.java
index a13c868..60180e6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockPositionalRead.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockIOUtils.java
@@ -17,33 +17,115 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
+
 import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.MultiByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.ExpectedException;
 
-/**
- * Unit test suite covering HFileBlock positional read logic.
- */
-@Category({IOTests.class, SmallTests.class})
-public class TestHFileBlockPositionalRead {
+@Category({ IOTests.class, SmallTests.class })
+public class TestBlockIOUtils {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestHFileBlockPositionalRead.class);
+      HBaseClassTestRule.forClass(TestBlockIOUtils.class);
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  @Test
+  public void testIsByteBufferReadable() throws IOException {
+    FileSystem fs = TEST_UTIL.getTestFileSystem();
+    Path p = new Path(TEST_UTIL.getDataTestDirOnTestFS(), "testIsByteBufferReadable");
+    try (FSDataOutputStream out = fs.create(p)) {
+      out.writeInt(23);
+    }
+    try (FSDataInputStream is = fs.open(p)) {
+      assertFalse(BlockIOUtils.isByteBufferReadable(is));
+    }
+  }
+
+  @Test
+  public void testReadFully() throws IOException {
+    FileSystem fs = TEST_UTIL.getTestFileSystem();
+    Path p = new Path(TEST_UTIL.getDataTestDirOnTestFS(), "testReadFully");
+    String s = "hello world";
+    try (FSDataOutputStream out = fs.create(p)) {
+      out.writeBytes(s);
+    }
+    ByteBuff buf = new SingleByteBuff(ByteBuffer.allocate(11));
+    try (FSDataInputStream in = fs.open(p)) {
+      BlockIOUtils.readFully(buf, in, 11);
+    }
+    buf.rewind();
+    byte[] heapBuf = new byte[s.length()];
+    buf.get(heapBuf, 0, heapBuf.length);
+    assertArrayEquals(Bytes.toBytes(s), heapBuf);
+  }
+
+  @Test
+  public void testReadWithExtra() throws IOException {
+    FileSystem fs = TEST_UTIL.getTestFileSystem();
+    Path p = new Path(TEST_UTIL.getDataTestDirOnTestFS(), "testReadWithExtra");
+    String s = "hello world";
+    try (FSDataOutputStream out = fs.create(p)) {
+      out.writeBytes(s);
+    }
+    ByteBuff buf = new SingleByteBuff(ByteBuffer.allocate(8));
+    try (FSDataInputStream in = fs.open(p)) {
+      assertTrue(BlockIOUtils.readWithExtra(buf, in, 6, 2));
+    }
+    buf.rewind();
+    byte[] heapBuf = new byte[buf.capacity()];
+    buf.get(heapBuf, 0, heapBuf.length);
+    assertArrayEquals(Bytes.toBytes("hello wo"), heapBuf);
+
+    buf = new MultiByteBuff(ByteBuffer.allocate(4), ByteBuffer.allocate(4), ByteBuffer.allocate(4));
+    try (FSDataInputStream in = fs.open(p)) {
+      assertTrue(BlockIOUtils.readWithExtra(buf, in, 8, 3));
+    }
+    buf.rewind();
+    heapBuf = new byte[11];
+    buf.get(heapBuf, 0, heapBuf.length);
+    assertArrayEquals(Bytes.toBytes("hello world"), heapBuf);
+
+    buf.position(0).limit(12);
+    try (FSDataInputStream in = fs.open(p)) {
+      try {
+        BlockIOUtils.readWithExtra(buf, in, 12, 0);
+        fail("Should only read 11 bytes");
+      } catch (IOException e) {
+
+      }
+    }
+  }
+
   @Test
   public void testPositionalReadNoExtra() throws IOException {
     long position = 0;
@@ -52,10 +134,10 @@ public class TestHFileBlockPositionalRead {
     int extraLen = 0;
     int totalLen = necessaryLen + extraLen;
     byte[] buf = new byte[totalLen];
+    ByteBuff bb = new SingleByteBuff(ByteBuffer.wrap(buf, 0, totalLen));
     FSDataInputStream in = mock(FSDataInputStream.class);
     when(in.read(position, buf, bufOffset, totalLen)).thenReturn(totalLen);
-    boolean ret = HFileBlock.positionalReadWithExtra(in, position, buf,
-        bufOffset, necessaryLen, extraLen);
+    boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
     assertFalse("Expect false return when no extra bytes requested", ret);
     verify(in).read(position, buf, bufOffset, totalLen);
     verifyNoMoreInteractions(in);
@@ -69,11 +151,11 @@ public class TestHFileBlockPositionalRead {
     int extraLen = 0;
     int totalLen = necessaryLen + extraLen;
     byte[] buf = new byte[totalLen];
+    ByteBuff bb = new SingleByteBuff(ByteBuffer.wrap(buf, 0, totalLen));
     FSDataInputStream in = mock(FSDataInputStream.class);
     when(in.read(position, buf, bufOffset, totalLen)).thenReturn(5);
     when(in.read(5, buf, 5, 5)).thenReturn(5);
-    boolean ret = HFileBlock.positionalReadWithExtra(in, position, buf,
-        bufOffset, necessaryLen, extraLen);
+    boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
     assertFalse("Expect false return when no extra bytes requested", ret);
     verify(in).read(position, buf, bufOffset, totalLen);
     verify(in).read(5, buf, 5, 5);
@@ -88,10 +170,10 @@ public class TestHFileBlockPositionalRead {
     int extraLen = 5;
     int totalLen = necessaryLen + extraLen;
     byte[] buf = new byte[totalLen];
+    ByteBuff bb = new SingleByteBuff(ByteBuffer.wrap(buf, 0, totalLen));
     FSDataInputStream in = mock(FSDataInputStream.class);
     when(in.read(position, buf, bufOffset, totalLen)).thenReturn(totalLen);
-    boolean ret = HFileBlock.positionalReadWithExtra(in, position, buf,
-        bufOffset, necessaryLen, extraLen);
+    boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
     assertTrue("Expect true return when reading extra bytes succeeds", ret);
     verify(in).read(position, buf, bufOffset, totalLen);
     verifyNoMoreInteractions(in);
@@ -105,10 +187,10 @@ public class TestHFileBlockPositionalRead {
     int extraLen = 5;
     int totalLen = necessaryLen + extraLen;
     byte[] buf = new byte[totalLen];
+    ByteBuff bb = new SingleByteBuff(ByteBuffer.wrap(buf, 0, totalLen));
     FSDataInputStream in = mock(FSDataInputStream.class);
     when(in.read(position, buf, bufOffset, totalLen)).thenReturn(necessaryLen);
-    boolean ret = HFileBlock.positionalReadWithExtra(in, position, buf,
-        bufOffset, necessaryLen, extraLen);
+    boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
     assertFalse("Expect false return when reading extra bytes fails", ret);
     verify(in).read(position, buf, bufOffset, totalLen);
     verifyNoMoreInteractions(in);
@@ -123,11 +205,11 @@ public class TestHFileBlockPositionalRead {
     int extraLen = 5;
     int totalLen = necessaryLen + extraLen;
     byte[] buf = new byte[totalLen];
+    ByteBuff bb = new SingleByteBuff(ByteBuffer.wrap(buf, 0, totalLen));
     FSDataInputStream in = mock(FSDataInputStream.class);
     when(in.read(position, buf, bufOffset, totalLen)).thenReturn(5);
     when(in.read(5, buf, 5, 10)).thenReturn(10);
-    boolean ret = HFileBlock.positionalReadWithExtra(in, position, buf,
-        bufOffset, necessaryLen, extraLen);
+    boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
     assertTrue("Expect true return when reading extra bytes succeeds", ret);
     verify(in).read(position, buf, bufOffset, totalLen);
     verify(in).read(5, buf, 5, 10);
@@ -142,12 +224,12 @@ public class TestHFileBlockPositionalRead {
     int extraLen = 0;
     int totalLen = necessaryLen + extraLen;
     byte[] buf = new byte[totalLen];
+    ByteBuff bb = new SingleByteBuff(ByteBuffer.wrap(buf, 0, totalLen));
     FSDataInputStream in = mock(FSDataInputStream.class);
     when(in.read(position, buf, bufOffset, totalLen)).thenReturn(9);
     when(in.read(position, buf, bufOffset, totalLen)).thenReturn(-1);
     exception.expect(IOException.class);
     exception.expectMessage("EOF");
-    HFileBlock.positionalReadWithExtra(in, position, buf, bufOffset,
-        necessaryLen, extraLen);
+    BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
index e93b61e..a4135d7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
@@ -398,23 +398,25 @@ public class TestChecksum {
       return b;
     }
 
+
     @Override
-    protected int readAtOffset(FSDataInputStream istream, byte [] dest, int destOffset, int size,
+    protected boolean readAtOffset(FSDataInputStream istream, ByteBuff dest, int size,
         boolean peekIntoNextBlock, long fileOffset, boolean pread) throws IOException {
-      int returnValue = super.readAtOffset(istream, dest, destOffset, size, peekIntoNextBlock,
-          fileOffset, pread);
+      int destOffset = dest.position();
+      boolean returnValue =
+          super.readAtOffset(istream, dest, size, peekIntoNextBlock, fileOffset, pread);
       if (!corruptDataStream) {
         return returnValue;
       }
       // Corrupt 3rd character of block magic of next block's header.
       if (peekIntoNextBlock) {
-        dest[destOffset + size + 3] = 0b00000000;
+        dest.put(destOffset + size + 3, (byte) 0b00000000);
       }
       // We might be reading this block's header too, corrupt it.
-      dest[destOffset + 1] = 0b00000000;
+      dest.put(destOffset + 1, (byte) 0b00000000);
       // Corrupt non header data
       if (size > hdrSize) {
-        dest[destOffset + hdrSize + 1] = 0b00000000;
+        dest.put(destOffset + hdrSize + 1, (byte) 0b00000000);
       }
       return returnValue;
     }


[hbase] 17/24: HBASE-21990 puppycrawl checkstyle dtds 404... moved to sourceforge ADDENDUM -- dtds moved location. See https://github.com/checkstyle/checkstyle/issues/6478

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit b1545c3fd0bc2a60195b7827910fb9c41c23049d
Author: stack <st...@apache.org>
AuthorDate: Fri Mar 8 08:49:06 2019 -0800

    HBASE-21990 puppycrawl checkstyle dtds 404... moved to sourceforge
    ADDENDUM -- dtds moved location. See
    https://github.com/checkstyle/checkstyle/issues/6478
---
 .../src/main/resources/hbase/checkstyle-suppressions.xml           | 7 -------
 hbase-checkstyle/src/main/resources/hbase/checkstyle.xml           | 7 ++++---
 2 files changed, 4 insertions(+), 10 deletions(-)

diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
index 1274426..6e74311 100644
--- a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
+++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml
@@ -3,13 +3,6 @@
     "-//Checkstyle//DTD SuppressionFilter Configuration 1.2//EN"
     "https://checkstyle.org/dtds/suppressions_1_2.dtd">
 <!--
-  TODO Update to use the message suppression filter once we can update
-  to checkstyle 8.6+
-<!DOCTYPE suppressions PUBLIC
-    "-//Checkstyle//DTD SuppressionFilter Configuration 1.2//EN"
-    "https://checkstyle.org/dtds/suppressions_1_2.dtd">
--->
-  <!--
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
index 746bd11..2001ae7 100644
--- a/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
+++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle.xml
@@ -1,7 +1,8 @@
 <?xml version="1.0"?>
-<!DOCTYPE module PUBLIC "-//Puppy Crawl//DTD Check Configuration 1.3//EN"
-  "http://checkstyle.sourceforge.net/dtds/configuration_1_3.dtd">
-  <!--
+<!DOCTYPE module PUBLIC
+  "-//Checkstyle//DTD Checkstyle Configuration 1.3//EN"
+  "https://checkstyle.org/dtds/configuration_1_3.dtd" >
+<!--
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file


[hbase] 10/24: HBASE-21874 Bucket cache on Persistent memory

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 9dc522b88166e25076c3a064150c98a062ae4ffb
Author: ramkrishna <ra...@apache.org>
AuthorDate: Wed Mar 6 22:09:51 2019 +0530

    HBASE-21874 Bucket cache on Persistent memory
    
    Signed-off-by: Anoop John <an...@gmail.com>
    Signed-off-by: Sean Busbey <bu...@apache.org>
    (cherry picked from commit 763202d48eaf8163f26840ea206a63b125aa3770)
---
 hbase-common/src/main/resources/hbase-default.xml  |  6 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 10 ++-
 .../hfile/bucket/ExclusiveMemoryMmapIOEngine.java  | 50 +++++++++++++++
 .../{FileMmapEngine.java => FileMmapIOEngine.java} | 73 ++++++++++------------
 .../io/hfile/bucket/SharedMemoryMmapIOEngine.java  | 64 +++++++++++++++++++
 ...ine.java => TestExclusiveMemoryMmapEngine.java} |  8 +--
 src/main/asciidoc/_chapters/architecture.adoc      |  2 +
 src/main/asciidoc/_chapters/hbase-default.adoc     | 11 ++--
 8 files changed, 174 insertions(+), 50 deletions(-)

diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index ad6f645..c8ab48a 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -925,8 +925,10 @@ possible configurations would overwhelm and obscure the important.
     <name>hbase.bucketcache.ioengine</name>
     <value></value>
     <description>Where to store the contents of the bucketcache. One of: offheap,
-    file, files or mmap. If a file or files, set it to file(s):PATH_TO_FILE.
-    mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE.
+    file, files, mmap or pmem. If a file or files, set it to file(s):PATH_TO_FILE.
+    mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE. 'pmem'
+    is bucket cache over a file on the persistent memory device.
+    Use pmem:PATH_TO_FILE.
     See http://hbase.apache.org/book.html#offheap.blockcache for more information.
     </description>
   </property>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 5a21bad..470c4f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -379,7 +379,15 @@ public class BucketCache implements BlockCache, HeapSize {
     } else if (ioEngineName.startsWith("offheap")) {
       return new ByteBufferIOEngine(capacity);
     } else if (ioEngineName.startsWith("mmap:")) {
-      return new FileMmapEngine(ioEngineName.substring(5), capacity);
+      return new ExclusiveMemoryMmapIOEngine(ioEngineName.substring(5), capacity);
+    } else if (ioEngineName.startsWith("pmem:")) {
+      // This mode of bucket cache creates an IOEngine over a file on the persistent memory
+      // device. Since the persistent memory device has its own address space the contents
+      // mapped to this address space does not get swapped out like in the case of mmapping
+      // on to DRAM. Hence the cells created out of the hfile blocks in the pmem bucket cache
+      // can be directly referred to without having to copy them onheap. Once the RPC is done,
+      // the blocks can be returned back as in case of ByteBufferIOEngine.
+      return new SharedMemoryMmapIOEngine(ioEngineName.substring(5), capacity);
     } else {
       throw new IllegalArgumentException(
           "Don't understand io engine name for cache- prefix with file:, files:, mmap: or offheap");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java
new file mode 100644
index 0000000..8b024f0
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ExclusiveMemoryMmapIOEngine.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile.bucket;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.hbase.io.hfile.Cacheable;
+import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * IO engine that stores data to a file on the local block device using memory mapping
+ * mechanism
+ */
+@InterfaceAudience.Private
+public class ExclusiveMemoryMmapIOEngine extends FileMmapIOEngine {
+  static final Logger LOG = LoggerFactory.getLogger(ExclusiveMemoryMmapIOEngine.class);
+
+  public ExclusiveMemoryMmapIOEngine(String filePath, long capacity) throws IOException {
+    super(filePath, capacity);
+  }
+
+  @Override
+  public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
+      throws IOException {
+    byte[] dst = new byte[length];
+    bufferArray.getMultiple(offset, length, dst);
+    return deserializer.deserialize(new SingleByteBuff(ByteBuffer.wrap(dst)), true,
+      MemoryType.EXCLUSIVE);
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
similarity index 66%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java
rename to hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
index 82f42cd..9580efe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileMmapIOEngine.java
@@ -1,20 +1,19 @@
 /**
- * Copyright The Apache Software Foundation
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
  *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
@@ -24,33 +23,31 @@ import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.io.hfile.Cacheable;
-import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
 import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
 import org.apache.hadoop.hbase.nio.ByteBuff;
-import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.util.ByteBufferAllocator;
 import org.apache.hadoop.hbase.util.ByteBufferArray;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
- * IO engine that stores data to a file on the local file system using memory mapping
+ * IO engine that stores data to a file on the specified file system using memory mapping
  * mechanism
  */
 @InterfaceAudience.Private
-public class FileMmapEngine implements IOEngine {
-  static final Logger LOG = LoggerFactory.getLogger(FileMmapEngine.class);
+public abstract class FileMmapIOEngine implements IOEngine {
+  static final Logger LOG = LoggerFactory.getLogger(FileMmapIOEngine.class);
 
-  private final String path;
-  private long size;
-  private ByteBufferArray bufferArray;
+  protected final String path;
+  protected long size;
+  protected ByteBufferArray bufferArray;
   private final FileChannel fileChannel;
   private RandomAccessFile raf = null;
 
-  public FileMmapEngine(String filePath, long capacity) throws IOException {
+  public FileMmapIOEngine(String filePath, long capacity) throws IOException {
     this.path = filePath;
     this.size = capacity;
     long fileSize = 0;
@@ -64,13 +61,15 @@ public class FileMmapEngine implements IOEngine {
       LOG.error("Can't create bucket cache file " + filePath, fex);
       throw fex;
     } catch (IOException ioex) {
-      LOG.error("Can't extend bucket cache file; insufficient space for "
-          + StringUtils.byteDesc(fileSize), ioex);
+      LOG.error(
+        "Can't extend bucket cache file; insufficient space for " + StringUtils.byteDesc(fileSize),
+        ioex);
       shutdown();
       throw ioex;
     }
     ByteBufferAllocator allocator = new ByteBufferAllocator() {
       AtomicInteger pos = new AtomicInteger(0);
+
       @Override
       public ByteBuffer allocate(long size) throws IOException {
         ByteBuffer buffer = fileChannel.map(java.nio.channels.FileChannel.MapMode.READ_WRITE,
@@ -87,8 +86,8 @@ public class FileMmapEngine implements IOEngine {
 
   @Override
   public String toString() {
-    return "ioengine=" + this.getClass().getSimpleName() + ", path=" + this.path +
-      ", size=" + String.format("%,d", this.size);
+    return "ioengine=" + this.getClass().getSimpleName() + ", path=" + this.path + ", size="
+        + String.format("%,d", this.size);
   }
 
   /**
@@ -97,17 +96,13 @@ public class FileMmapEngine implements IOEngine {
    */
   @Override
   public boolean isPersistent() {
+    // TODO : HBASE-21981 needed for persistence to really work
     return true;
   }
 
   @Override
-  public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
-      throws IOException {
-    byte[] dst = new byte[length];
-    bufferArray.getMultiple(offset, length, dst);
-    return deserializer.deserialize(new SingleByteBuff(ByteBuffer.wrap(dst)), true,
-      MemoryType.EXCLUSIVE);
-  }
+  public abstract Cacheable read(long offset, int length,
+      CacheableDeserializer<Cacheable> deserializer) throws IOException;
 
   /**
    * Transfers data from the given byte buffer to file
@@ -119,7 +114,7 @@ public class FileMmapEngine implements IOEngine {
   public void write(ByteBuffer srcBuffer, long offset) throws IOException {
     assert srcBuffer.hasArray();
     bufferArray.putMultiple(offset, srcBuffer.remaining(), srcBuffer.array(),
-        srcBuffer.arrayOffset());
+      srcBuffer.arrayOffset());
   }
 
   @Override
@@ -127,9 +122,9 @@ public class FileMmapEngine implements IOEngine {
     // This singleByteBuff can be considered to be array backed
     assert srcBuffer.hasArray();
     bufferArray.putMultiple(offset, srcBuffer.remaining(), srcBuffer.array(),
-        srcBuffer.arrayOffset());
-
+      srcBuffer.arrayOffset());
   }
+
   /**
    * Sync the data to file after writing
    * @throws IOException
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java
new file mode 100644
index 0000000..b6a7a57
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/SharedMemoryMmapIOEngine.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile.bucket;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.io.hfile.Cacheable;
+import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * IO engine that stores data in pmem devices such as DCPMM. This engine also mmaps the file from
+ * the given path. But note that this path has to be a path on the pmem device so that when mmapped
+ * the file's address is mapped to the Pmem's address space and not in the DRAM. Since this address
+ * space is exclusive for the Pmem device there is no swapping out of the mmapped contents that
+ * generally happens when DRAM's free space is not enough to hold the specified file's mmapped
+ * contents. This gives us the option of using the {@code MemoryType#SHARED} type when serving the
+ * data from this pmem address space. We need not copy the blocks to the onheap space as we need to
+ * do for the case of {@code ExclusiveMemoryMmapIOEngine}.
+ */
+@InterfaceAudience.Private
+public class SharedMemoryMmapIOEngine extends FileMmapIOEngine {
+
+  // TODO this will support only one path over Pmem. To make use of multiple Pmem devices mounted,
+  // we need to support multiple paths like files IOEngine. Support later.
+  public SharedMemoryMmapIOEngine(String filePath, long capacity) throws IOException {
+    super(filePath, capacity);
+  }
+
+  @Override
+  public boolean usesSharedMemory() {
+    return true;
+  }
+
+  @Override
+  public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
+      throws IOException {
+    ByteBuff dstBuffer = bufferArray.asSubByteBuff(offset, length);
+    // Here the buffer that is created directly refers to the buffer in the actual buckets.
+    // When any cell is referring to the blocks created out of these buckets then it means that
+    // those cells are referring to a shared memory area which if evicted by the BucketCache would
+    // lead to corruption of results. Hence we set the type of the buffer as SHARED_MEMORY
+    // so that the readers using this block are aware of this fact and do the necessary action
+    // to prevent eviction till the results are either consumed or copied
+    return deserializer.deserialize(dstBuffer, true, MemoryType.SHARED);
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileMmapEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestExclusiveMemoryMmapEngine.java
similarity index 90%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileMmapEngine.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestExclusiveMemoryMmapEngine.java
index 2748d80..d0d8c8a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileMmapEngine.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestExclusiveMemoryMmapEngine.java
@@ -32,21 +32,21 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
- * Basic test for {@link FileMmapEngine}
+ * Basic test for {@link ExclusiveMemoryMmapIOEngine}
  */
 @Category({IOTests.class, SmallTests.class})
-public class TestFileMmapEngine {
+public class TestExclusiveMemoryMmapEngine {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestFileMmapEngine.class);
+      HBaseClassTestRule.forClass(TestExclusiveMemoryMmapEngine.class);
 
   @Test
   public void testFileMmapEngine() throws IOException {
     int size = 2 * 1024 * 1024; // 2 MB
     String filePath = "testFileMmapEngine";
     try {
-      FileMmapEngine fileMmapEngine = new FileMmapEngine(filePath, size);
+      ExclusiveMemoryMmapIOEngine fileMmapEngine = new ExclusiveMemoryMmapIOEngine(filePath, size);
       for (int i = 0; i < 50; i++) {
         int len = (int) Math.floor(Math.random() * 100);
         long offset = (long) Math.floor(Math.random() * size % (size - len));
diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc
index c250aa8..9a5eba9 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -733,6 +733,8 @@ See configurations, sizings, current usage, time-in-the-cache, and even detail o
 
 `LruBlockCache` is the original implementation, and is entirely within the Java heap.
 `BucketCache` is optional and mainly intended for keeping block cache data off-heap, although `BucketCache` can also be a file-backed cache.
+ In file-backed we can either use it in the file mode or the mmaped mode.
+ We also have pmem mode where the bucket cache resides on the persistent memory device.
 
 When you enable BucketCache, you are enabling a two tier caching system. We used to describe the
 tiers as "L1" and "L2" but have deprecated this terminology as of hbase-2.0.0. The "L1" cache referred to an
diff --git a/src/main/asciidoc/_chapters/hbase-default.adoc b/src/main/asciidoc/_chapters/hbase-default.adoc
index f809f28..ccd5f6f 100644
--- a/src/main/asciidoc/_chapters/hbase-default.adoc
+++ b/src/main/asciidoc/_chapters/hbase-default.adoc
@@ -1197,10 +1197,13 @@ When the size of a leaf-level, intermediate-level, or root-level
 *`hbase.bucketcache.ioengine`*::
 +
 .Description
-Where to store the contents of the bucketcache. One of: onheap,
-      offheap, or file. If a file, set it to file:PATH_TO_FILE.
-      See https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
-      for more information.
+Where to store the contents of the bucketcache. One of: offheap,
+    file, files, mmap or pmem. If a file or files, set it to file(s):PATH_TO_FILE.
+    mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE.
+    'pmem' is bucket cache over a file on the persistent memory device.
+    Use pmem:PATH_TO_FILE.
+    See https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
+    for more information.
 
 +
 .Default


[hbase] 16/24: HBASE-21416 - fix TestRegionInfoDisplay flaky test

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 31902a098c5a27c59d67fd41c39042db46e15d49
Author: Norbert Kalmar <nk...@cloudera.com>
AuthorDate: Thu Mar 7 11:56:10 2019 +0100

    HBASE-21416 - fix TestRegionInfoDisplay flaky test
---
 .../java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java   | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
index 1a6f2f7..0800acd 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
@@ -68,12 +68,13 @@ public class TestRegionInfoDisplay {
     RegionState state = RegionState.createForTesting(convert(ri), RegionState.State.OPEN);
     String descriptiveNameForDisplay =
         RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf);
-    checkDescriptiveNameEquality(descriptiveNameForDisplay,state.toDescriptiveString(), startKey);
+    String originalDescriptive = state.toDescriptiveString();
+    checkDescriptiveNameEquality(descriptiveNameForDisplay, originalDescriptive, startKey);
 
     conf.setBoolean("hbase.display.keys", true);
     Assert.assertArrayEquals(endKey, RegionInfoDisplay.getEndKeyForDisplay(ri, conf));
     Assert.assertArrayEquals(startKey, RegionInfoDisplay.getStartKeyForDisplay(ri, conf));
-    Assert.assertEquals(state.toDescriptiveString(),
+    Assert.assertEquals(originalDescriptive,
         RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf));
   }
 


[hbase] 05/24: SE-22006 Fix branch-2.1 findbugs warning; causes nightly show as failed.

Posted by op...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

openinx pushed a commit to branch HBASE-21879
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit f935a75f227f0357f3c2bd8bf10033420c703484
Author: stack <st...@apache.org>
AuthorDate: Wed Mar 6 15:59:06 2019 -0800

    SE-22006 Fix branch-2.1 findbugs warning; causes nightly show as failed.
---
 .../java/org/apache/hadoop/hbase/regionserver/HRegionServer.java     | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index f983882..e407285 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1613,9 +1613,8 @@ public class HRegionServer extends HasThread implements
           MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT);
       int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT);
       // init the chunkCreator
-      ChunkCreator chunkCreator =
-          ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage,
-      initialCountPercentage, this.hMemManager);
+      ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage,
+        initialCountPercentage, this.hMemManager);
     }
   }