You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zg...@apache.org on 2020/08/21 09:20:56 UTC

[hbase] branch branch-2.2 updated: HBASE-24904 Speed up some unit tests (#2276)

This is an automated email from the ASF dual-hosted git repository.

zghao pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
     new a4368a7  HBASE-24904 Speed up some unit tests (#2276)
a4368a7 is described below

commit a4368a7b455848dab9d65631419958b3a7d888f3
Author: Guanghao Zhang <zg...@apache.org>
AuthorDate: Fri Aug 21 17:20:29 2020 +0800

    HBASE-24904 Speed up some unit tests (#2276)
    
    Split TestAsyncTableAdminApi/TestAdminShell/TestLoadIncrementalHFiles
    
    Reduce region numbers in TestSnapshotTemporaryDirectoryWithRegionReplicas/TestRegionReplicaFailover/TestSCP*
    
    Signed-off-by: meiyi <my...@gmail.com>
    Signed-off-by: stack <st...@apache.org>
---
 .../hbase/client/TestAsyncTableAdminApi.java       |  99 +------
 .../hbase/client/TestAsyncTableAdminApi4.java      | 143 ++++++++++
 .../client/TestSnapshotTemporaryDirectory.java     |  52 ----
 .../hadoop/hbase/master/procedure/TestSCPBase.java |   8 +-
 .../regionserver/TestRegionReplicaFailover.java    |   2 +-
 .../hbase/tool/TestLoadIncrementalHFiles.java      | 294 +-------------------
 .../hbase/tool/TestLoadIncrementalHFiles2.java     | 103 +++++++
 .../hbase/tool/TestLoadIncrementalHFilesBase.java  | 284 ++++++++++++++++++++
 .../tool/TestSecureLoadIncrementalHFiles.java      |  27 +-
 ....java => TestSecureLoadIncrementalHFiles2.java} |  31 +--
 .../hadoop/hbase/client/TestAdminShell3.java       |  42 +++
 hbase-shell/src/test/ruby/hbase/admin3_test.rb     | 298 +++++++++++++++++++++
 hbase-shell/src/test/ruby/hbase/admin_test.rb      | 269 -------------------
 13 files changed, 889 insertions(+), 763 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index 42d0118..573ff6b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -29,14 +30,13 @@ import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.CompletionException;
+
 import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -52,6 +52,7 @@ import org.junit.runners.Parameterized;
  * @see TestAsyncTableAdminApi2 This test and it used to be joined it was taking longer than our
  *     ten minute timeout so they were split.
  * @see TestAsyncTableAdminApi3 Another split out from this class so each runs under ten minutes.
+ * @see TestAsyncTableAdminApi4 Another split out from this class so each runs under ten minutes.
  */
 @RunWith(Parameterized.class)
 @Category({ LargeTests.class, ClientTests.class })
@@ -361,96 +362,4 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
       assertEquals(1, TEST_UTIL.getHBaseCluster().getRegions(tableName).size());
     }
   }
-
-  @Test
-  public void testCloneTableSchema() throws Exception {
-    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
-    testCloneTableSchema(tableName, newTableName, false);
-  }
-
-  @Test
-  public void testCloneTableSchemaPreservingSplits() throws Exception {
-    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
-    testCloneTableSchema(tableName, newTableName, true);
-  }
-
-  private void testCloneTableSchema(final TableName tableName,
-      final TableName newTableName, boolean preserveSplits) throws Exception {
-    byte[][] splitKeys = new byte[2][];
-    splitKeys[0] = Bytes.toBytes(4);
-    splitKeys[1] = Bytes.toBytes(8);
-    int NUM_FAMILYS = 2;
-    int NUM_REGIONS = 3;
-    int BLOCK_SIZE = 1024;
-    int TTL = 86400;
-    boolean BLOCK_CACHE = false;
-
-    // Create the table
-    TableDescriptor tableDesc = TableDescriptorBuilder
-        .newBuilder(tableName)
-        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
-        .setColumnFamily(ColumnFamilyDescriptorBuilder
-            .newBuilder(FAMILY_1)
-            .setBlocksize(BLOCK_SIZE)
-            .setBlockCacheEnabled(BLOCK_CACHE)
-            .setTimeToLive(TTL)
-            .build()).build();
-    admin.createTable(tableDesc, splitKeys).join();
-
-    assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(tableName).size());
-    assertTrue("Table should be created with splitKyes + 1 rows in META",
-        admin.isTableAvailable(tableName, splitKeys).get());
-
-    // Clone & Verify
-    admin.cloneTableSchema(tableName, newTableName, preserveSplits).join();
-    TableDescriptor newTableDesc = admin.getDescriptor(newTableName).get();
-
-    assertEquals(NUM_FAMILYS, newTableDesc.getColumnFamilyCount());
-    assertEquals(BLOCK_SIZE, newTableDesc.getColumnFamily(FAMILY_1).getBlocksize());
-    assertEquals(BLOCK_CACHE, newTableDesc.getColumnFamily(FAMILY_1).isBlockCacheEnabled());
-    assertEquals(TTL, newTableDesc.getColumnFamily(FAMILY_1).getTimeToLive());
-    TEST_UTIL.verifyTableDescriptorIgnoreTableName(tableDesc, newTableDesc);
-
-    if (preserveSplits) {
-      assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size());
-      assertTrue("New table should be created with splitKyes + 1 rows in META",
-          admin.isTableAvailable(newTableName, splitKeys).get());
-    } else {
-      assertEquals(1, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size());
-    }
-  }
-
-  @Test
-  public void testCloneTableSchemaWithNonExistentSourceTable() throws Exception {
-    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
-    // test for non-existent source table
-    try {
-      admin.cloneTableSchema(tableName, newTableName, false).join();
-      fail("Should have failed when source table doesn't exist.");
-    } catch (CompletionException e) {
-      assertTrue(e.getCause() instanceof TableNotFoundException);
-    }
-  }
-
-  @Test
-  public void testCloneTableSchemaWithExistentDestinationTable() throws Exception {
-    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
-    byte[] FAMILY_0 = Bytes.toBytes("cf0");
-    TEST_UTIL.createTable(tableName, FAMILY_0);
-    TEST_UTIL.createTable(newTableName, FAMILY_0);
-    // test for existent destination table
-    try {
-      admin.cloneTableSchema(tableName, newTableName, false).join();
-      fail("Should have failed when destination table exists.");
-    } catch (CompletionException e) {
-      assertTrue(e.getCause() instanceof TableExistsException);
-    }
-  }
-
-  @Test
-  public void testIsTableAvailableWithInexistantTable() throws Exception {
-    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
-    // test for inexistant table
-    assertFalse(admin.isTableAvailable(newTableName).get());
-  }
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi4.java
new file mode 100644
index 0000000..7daf049
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi4.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.concurrent.CompletionException;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Class to test asynchronous table admin operations
+ * @see TestAsyncTableAdminApi Split from it so each runs under ten minutes.
+ */
+@RunWith(Parameterized.class)
+@Category({ LargeTests.class, ClientTests.class })
+public class TestAsyncTableAdminApi4 extends TestAsyncAdminBase {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestAsyncTableAdminApi4.class);
+
+  @Test
+  public void testCloneTableSchema() throws Exception {
+    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
+    testCloneTableSchema(tableName, newTableName, false);
+  }
+
+  @Test
+  public void testCloneTableSchemaPreservingSplits() throws Exception {
+    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
+    testCloneTableSchema(tableName, newTableName, true);
+  }
+
+  private void testCloneTableSchema(final TableName tableName,
+      final TableName newTableName, boolean preserveSplits) throws Exception {
+    byte[][] splitKeys = new byte[2][];
+    splitKeys[0] = Bytes.toBytes(4);
+    splitKeys[1] = Bytes.toBytes(8);
+    int NUM_FAMILYS = 2;
+    int NUM_REGIONS = 3;
+    int BLOCK_SIZE = 1024;
+    int TTL = 86400;
+    boolean BLOCK_CACHE = false;
+
+    // Create the table
+    TableDescriptor tableDesc = TableDescriptorBuilder
+        .newBuilder(tableName)
+        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
+        .setColumnFamily(ColumnFamilyDescriptorBuilder
+            .newBuilder(FAMILY_1)
+            .setBlocksize(BLOCK_SIZE)
+            .setBlockCacheEnabled(BLOCK_CACHE)
+            .setTimeToLive(TTL)
+            .build()).build();
+    admin.createTable(tableDesc, splitKeys).join();
+
+    assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(tableName).size());
+    assertTrue("Table should be created with splitKyes + 1 rows in META",
+        admin.isTableAvailable(tableName, splitKeys).get());
+
+    // Clone & Verify
+    admin.cloneTableSchema(tableName, newTableName, preserveSplits).join();
+    TableDescriptor newTableDesc = admin.getDescriptor(newTableName).get();
+
+    assertEquals(NUM_FAMILYS, newTableDesc.getColumnFamilyCount());
+    assertEquals(BLOCK_SIZE, newTableDesc.getColumnFamily(FAMILY_1).getBlocksize());
+    assertEquals(BLOCK_CACHE, newTableDesc.getColumnFamily(FAMILY_1).isBlockCacheEnabled());
+    assertEquals(TTL, newTableDesc.getColumnFamily(FAMILY_1).getTimeToLive());
+    TEST_UTIL.verifyTableDescriptorIgnoreTableName(tableDesc, newTableDesc);
+
+    if (preserveSplits) {
+      assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size());
+      assertTrue("New table should be created with splitKyes + 1 rows in META",
+          admin.isTableAvailable(newTableName, splitKeys).get());
+    } else {
+      assertEquals(1, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size());
+    }
+  }
+
+  @Test
+  public void testCloneTableSchemaWithNonExistentSourceTable() throws Exception {
+    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
+    // test for non-existent source table
+    try {
+      admin.cloneTableSchema(tableName, newTableName, false).join();
+      fail("Should have failed when source table doesn't exist.");
+    } catch (CompletionException e) {
+      assertTrue(e.getCause() instanceof TableNotFoundException);
+    }
+  }
+
+  @Test
+  public void testCloneTableSchemaWithExistentDestinationTable() throws Exception {
+    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
+    byte[] FAMILY_0 = Bytes.toBytes("cf0");
+    TEST_UTIL.createTable(tableName, FAMILY_0);
+    TEST_UTIL.createTable(newTableName, FAMILY_0);
+    // test for existent destination table
+    try {
+      admin.cloneTableSchema(tableName, newTableName, false).join();
+      fail("Should have failed when destination table exists.");
+    } catch (CompletionException e) {
+      assertTrue(e.getCause() instanceof TableExistsException);
+    }
+  }
+
+  @Test
+  public void testIsTableAvailableWithInexistantTable() throws Exception {
+    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
+    // test for inexistant table
+    assertFalse(admin.isTableAvailable(newTableName).get());
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
index 3cbf88d..7c584ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
@@ -159,9 +159,7 @@ public class TestSnapshotTemporaryDirectory {
     TableName tableName = TableName.valueOf("testtb-" + tid);
     byte[] emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
     byte[] snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
-    byte[] snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
     int snapshot0Rows;
-    int snapshot1Rows;
 
     // create Table and disable it
     SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM);
@@ -177,21 +175,9 @@ public class TestSnapshotTemporaryDirectory {
       snapshot0Rows = UTIL.countRows(table);
     }
     admin.disableTable(tableName);
-
     // take a snapshot
     takeSnapshot(tableName, Bytes.toString(snapshotName0), true);
 
-    // enable table and insert more data
-    admin.enableTable(tableName);
-    SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
-    try (Table table = UTIL.getConnection().getTable(tableName)) {
-      snapshot1Rows = UTIL.countRows(table);
-    }
-
-    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
-    admin.disableTable(tableName);
-    takeSnapshot(tableName, Bytes.toString(snapshotName1), true);
-
     // Restore from snapshot-0
     admin.restoreSnapshot(snapshotName0);
     admin.enableTable(tableName);
@@ -204,19 +190,6 @@ public class TestSnapshotTemporaryDirectory {
     admin.enableTable(tableName);
     SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0);
     SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
-
-    // Restore from snapshot-1
-    admin.disableTable(tableName);
-    admin.restoreSnapshot(snapshotName1);
-    admin.enableTable(tableName);
-    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
-    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
-
-    // Restore from snapshot-1
-    UTIL.deleteTable(tableName);
-    admin.restoreSnapshot(snapshotName1);
-    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
-    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
   }
 
   @Test
@@ -226,9 +199,7 @@ public class TestSnapshotTemporaryDirectory {
     TableName tableName = TableName.valueOf("testtb-" + tid);
     byte[] emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
     byte[] snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
-    byte[] snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
     int snapshot0Rows;
-    int snapshot1Rows;
 
     // create Table
     SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM);
@@ -241,19 +212,9 @@ public class TestSnapshotTemporaryDirectory {
     try (Table table = UTIL.getConnection().getTable(tableName)) {
       snapshot0Rows = UTIL.countRows(table);
     }
-
     // take a snapshot
     takeSnapshot(tableName, Bytes.toString(snapshotName0), false);
 
-    // Insert more data
-    SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
-    try (Table table = UTIL.getConnection().getTable(tableName)) {
-      snapshot1Rows = UTIL.countRows(table);
-    }
-
-    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
-    takeSnapshot(tableName, Bytes.toString(snapshotName1), false);
-
     // Restore from snapshot-0
     admin.disableTable(tableName);
     admin.restoreSnapshot(snapshotName0);
@@ -267,19 +228,6 @@ public class TestSnapshotTemporaryDirectory {
     admin.enableTable(tableName);
     SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0);
     SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
-
-    // Restore from snapshot-1
-    admin.disableTable(tableName);
-    admin.restoreSnapshot(snapshotName1);
-    admin.enableTable(tableName);
-    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
-    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
-
-    // Restore from snapshot-1
-    UTIL.deleteTable(tableName);
-    admin.restoreSnapshot(snapshotName1);
-    SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
-    SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
   }
 
   /**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPBase.java
index aa5e4be..a37cac5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSCPBase.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.After;
 import org.junit.Before;
 import org.slf4j.Logger;
@@ -138,9 +139,10 @@ public class TestSCPBase {
   }
 
   protected Table createTable(final TableName tableName) throws IOException {
-    final Table t = this.util.createTable(tableName, HBaseTestingUtility.COLUMNS,
-      HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE, getRegionReplication());
-    return t;
+    int numRegions = 10;
+    byte[][] splitKeys = Bytes.split(Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), numRegions - 3);
+    return util
+        .createTable(tableName, HBaseTestingUtility.COLUMNS, splitKeys, getRegionReplication());
   }
 
   protected int getRegionReplication() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java
index 8a2123e..205abf9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java
@@ -324,7 +324,7 @@ public class TestRegionReplicaFailover {
    */
   @Test
   public void testLotsOfRegionReplicas() throws IOException {
-    int numRegions = NB_SERVERS * 20;
+    int numRegions = NB_SERVERS * 5;
     int regionReplication = 10;
     String tableName = htd.getTableName().getNameAsString() + "2";
     htd = HTU.createTableDescriptor(tableName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
index 8ea06e4..781872e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
@@ -21,24 +21,19 @@ import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+
 import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Locale;
-import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.atomic.AtomicInteger;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -47,8 +42,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
@@ -57,65 +50,22 @@ import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileTestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.ClassRule;
-import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 /**
  * Test cases for the "load" half of the HFileOutputFormat bulk load functionality. These tests run
  * faster than the full MR cluster tests in TestHFileOutputFormat
  */
 @Category({ MiscTests.class, LargeTests.class })
-public class TestLoadIncrementalHFiles {
+public class TestLoadIncrementalHFiles extends TestLoadIncrementalHFilesBase {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestLoadIncrementalHFiles.class);
 
-  @Rule
-  public TestName tn = new TestName();
-
-  private static final byte[] QUALIFIER = Bytes.toBytes("myqual");
-  private static final byte[] FAMILY = Bytes.toBytes("myfam");
-  private static final String NAMESPACE = "bulkNS";
-
-  static final String EXPECTED_MSG_FOR_NON_EXISTING_FAMILY = "Unmatched family names found";
-  static final int MAX_FILES_PER_REGION_PER_FAMILY = 4;
-
-  private static final byte[][] SPLIT_KEYS =
-      new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ppp") };
-
-  static HBaseTestingUtility util = new HBaseTestingUtility();
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
-    util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
-      MAX_FILES_PER_REGION_PER_FAMILY);
-    // change default behavior so that tag values are returned with normal rpcs
-    util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
-      KeyValueCodecWithTags.class.getCanonicalName());
-    util.startMiniCluster();
-
-    setupNamespace();
-  }
-
-  protected static void setupNamespace() throws Exception {
-    util.getAdmin().createNamespace(NamespaceDescriptor.create(NAMESPACE).build());
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    util.shutdownMiniCluster();
-  }
-
   @Test
   public void testSimpleLoadWithMap() throws Exception {
     runTest("testSimpleLoadWithMap", BloomType.NONE,
@@ -145,36 +95,6 @@ public class TestLoadIncrementalHFiles {
   }
 
   /**
-   * Test case that creates some regions and loads HFiles that cross the boundaries of those regions
-   */
-  @Test
-  public void testRegionCrossingLoad() throws Exception {
-    runTest("testRegionCrossingLoad", BloomType.NONE,
-      new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
-          new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
-  }
-
-  /**
-   * Test loading into a column family that has a ROW bloom filter.
-   */
-  @Test
-  public void testRegionCrossingRowBloom() throws Exception {
-    runTest("testRegionCrossingLoadRowBloom", BloomType.ROW,
-      new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
-          new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
-  }
-
-  /**
-   * Test loading into a column family that has a ROWCOL bloom filter.
-   */
-  @Test
-  public void testRegionCrossingRowColBloom() throws Exception {
-    runTest("testRegionCrossingLoadRowColBloom", BloomType.ROWCOL,
-      new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
-          new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
-  }
-
-  /**
    * Test case that creates some regions and loads HFiles that have different region boundaries than
    * the table pre-split.
    */
@@ -187,33 +107,6 @@ public class TestLoadIncrementalHFiles {
           new byte[][] { Bytes.toBytes("mmm"), Bytes.toBytes("zzz") }, });
   }
 
-  /**
-   * Test case that creates some regions and loads HFiles that cross the boundaries and have
-   * different region boundaries than the table pre-split.
-   */
-  @Test
-  public void testRegionCrossingHFileSplit() throws Exception {
-    testRegionCrossingHFileSplit(BloomType.NONE);
-  }
-
-  /**
-   * Test case that creates some regions and loads HFiles that cross the boundaries have a ROW bloom
-   * filter and a different region boundaries than the table pre-split.
-   */
-  @Test
-  public void testRegionCrossingHFileSplitRowBloom() throws Exception {
-    testRegionCrossingHFileSplit(BloomType.ROW);
-  }
-
-  /**
-   * Test case that creates some regions and loads HFiles that cross the boundaries have a ROWCOL
-   * bloom filter and a different region boundaries than the table pre-split.
-   */
-  @Test
-  public void testRegionCrossingHFileSplitRowColBloom() throws Exception {
-    testRegionCrossingHFileSplit(BloomType.ROWCOL);
-  }
-
   @Test
   public void testSplitALot() throws Exception {
     runTest("testSplitALot", BloomType.NONE,
@@ -226,187 +119,6 @@ public class TestLoadIncrementalHFiles {
       new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("zzz") }, });
   }
 
-  private void testRegionCrossingHFileSplit(BloomType bloomType) throws Exception {
-    runTest("testHFileSplit" + bloomType + "Bloom", bloomType,
-      new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"), Bytes.toBytes("jjj"),
-          Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), },
-      new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
-          new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
-  }
-
-  private TableDescriptor buildHTD(TableName tableName, BloomType bloomType) {
-    return TableDescriptorBuilder.newBuilder(tableName)
-        .setColumnFamily(
-          ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBloomFilterType(bloomType).build())
-        .build();
-  }
-
-  private void runTest(String testName, BloomType bloomType, byte[][][] hfileRanges)
-      throws Exception {
-    runTest(testName, bloomType, null, hfileRanges);
-  }
-
-  private void runTest(String testName, BloomType bloomType, byte[][][] hfileRanges, boolean useMap)
-      throws Exception {
-    runTest(testName, bloomType, null, hfileRanges, useMap);
-  }
-
-  private void runTest(String testName, BloomType bloomType, byte[][] tableSplitKeys,
-      byte[][][] hfileRanges) throws Exception {
-    runTest(testName, bloomType, tableSplitKeys, hfileRanges, false);
-  }
-
-  private void runTest(String testName, BloomType bloomType, byte[][] tableSplitKeys,
-      byte[][][] hfileRanges, boolean useMap) throws Exception {
-    final byte[] TABLE_NAME = Bytes.toBytes("mytable_" + testName);
-    final boolean preCreateTable = tableSplitKeys != null;
-
-    // Run the test bulkloading the table to the default namespace
-    final TableName TABLE_WITHOUT_NS = TableName.valueOf(TABLE_NAME);
-    runTest(testName, TABLE_WITHOUT_NS, bloomType, preCreateTable, tableSplitKeys, hfileRanges,
-      useMap, 2);
-
-
-    /* Run the test bulkloading the table from a depth of 3
-      directory structure is now
-      baseDirectory
-          -- regionDir
-            -- familyDir
-              -- storeFileDir
-    */
-    if (preCreateTable) {
-      runTest(testName + 2, TABLE_WITHOUT_NS, bloomType, true, tableSplitKeys, hfileRanges,
-          false, 3);
-    }
-
-    // Run the test bulkloading the table to the specified namespace
-    final TableName TABLE_WITH_NS = TableName.valueOf(Bytes.toBytes(NAMESPACE), TABLE_NAME);
-    runTest(testName, TABLE_WITH_NS, bloomType, preCreateTable, tableSplitKeys, hfileRanges,
-      useMap, 2);
-  }
-
-  private void runTest(String testName, TableName tableName, BloomType bloomType,
-      boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges,
-      boolean useMap, int depth) throws Exception {
-    TableDescriptor htd = buildHTD(tableName, bloomType);
-    runTest(testName, htd, preCreateTable, tableSplitKeys, hfileRanges, useMap, false, depth);
-  }
-
-  public static int loadHFiles(String testName, TableDescriptor htd, HBaseTestingUtility util,
-      byte[] fam, byte[] qual, boolean preCreateTable, byte[][] tableSplitKeys,
-      byte[][][] hfileRanges, boolean useMap, boolean deleteFile, boolean copyFiles,
-      int initRowCount, int factor) throws Exception {
-    return loadHFiles(testName, htd, util, fam, qual, preCreateTable, tableSplitKeys, hfileRanges,
-        useMap, deleteFile, copyFiles, initRowCount, factor, 2);
-  }
-
-  public static int loadHFiles(String testName, TableDescriptor htd, HBaseTestingUtility util,
-      byte[] fam, byte[] qual, boolean preCreateTable, byte[][] tableSplitKeys,
-      byte[][][] hfileRanges, boolean useMap, boolean deleteFile, boolean copyFiles,
-      int initRowCount, int factor, int depth) throws Exception {
-    Path baseDirectory = util.getDataTestDirOnTestFS(testName);
-    FileSystem fs = util.getTestFileSystem();
-    baseDirectory = baseDirectory.makeQualified(fs.getUri(), fs.getWorkingDirectory());
-    Path parentDir = baseDirectory;
-    if (depth == 3) {
-      assert !useMap;
-      parentDir = new Path(baseDirectory, "someRegion");
-    }
-    Path familyDir = new Path(parentDir, Bytes.toString(fam));
-
-    int hfileIdx = 0;
-    Map<byte[], List<Path>> map = null;
-    List<Path> list = null;
-    if (useMap || copyFiles) {
-      list = new ArrayList<>();
-    }
-    if (useMap) {
-      map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
-      map.put(fam, list);
-    }
-    Path last = null;
-    for (byte[][] range : hfileRanges) {
-      byte[] from = range[0];
-      byte[] to = range[1];
-      Path path = new Path(familyDir, "hfile_" + hfileIdx++);
-      HFileTestUtil.createHFile(util.getConfiguration(), fs, path, fam, qual, from, to, factor);
-      if (useMap) {
-        last = path;
-        list.add(path);
-      }
-    }
-    int expectedRows = hfileIdx * factor;
-
-    TableName tableName = htd.getTableName();
-    if (!util.getAdmin().tableExists(tableName) && (preCreateTable || map != null)) {
-      util.getAdmin().createTable(htd, tableSplitKeys);
-    }
-
-    Configuration conf = util.getConfiguration();
-    if (copyFiles) {
-      conf.setBoolean(LoadIncrementalHFiles.ALWAYS_COPY_FILES, true);
-    }
-    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf);
-    List<String> args = Lists.newArrayList(baseDirectory.toString(), tableName.toString());
-    if (depth == 3) {
-      args.add("-loadTable");
-    }
-
-    if (useMap) {
-      if (deleteFile) {
-        fs.delete(last, true);
-      }
-      Map<BulkLoadHFiles.LoadQueueItem, ByteBuffer> loaded = loader.bulkLoad(tableName, map);
-      if (deleteFile) {
-        expectedRows -= 1000;
-        for (BulkLoadHFiles.LoadQueueItem item : loaded.keySet()) {
-          if (item.getFilePath().getName().equals(last.getName())) {
-            fail(last + " should be missing");
-          }
-        }
-      }
-    } else {
-      loader.run(args.toArray(new String[] {}));
-    }
-
-    if (copyFiles) {
-      for (Path p : list) {
-        assertTrue(p + " should exist", fs.exists(p));
-      }
-    }
-
-    Table table = util.getConnection().getTable(tableName);
-    try {
-      assertEquals(initRowCount + expectedRows, util.countRows(table));
-    } finally {
-      table.close();
-    }
-
-    return expectedRows;
-  }
-
-  private void runTest(String testName, TableDescriptor htd,
-      boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap,
-      boolean copyFiles, int depth) throws Exception {
-    loadHFiles(testName, htd, util, FAMILY, QUALIFIER, preCreateTable, tableSplitKeys, hfileRanges,
-      useMap, true, copyFiles, 0, 1000, depth);
-
-    final TableName tableName = htd.getTableName();
-    // verify staging folder has been cleaned up
-    Path stagingBasePath =
-        new Path(FSUtils.getRootDir(util.getConfiguration()), HConstants.BULKLOAD_STAGING_DIR_NAME);
-    FileSystem fs = util.getTestFileSystem();
-    if (fs.exists(stagingBasePath)) {
-      FileStatus[] files = fs.listStatus(stagingBasePath);
-      for (FileStatus file : files) {
-        assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
-          file.getPath().getName() != "DONOTERASE");
-      }
-    }
-
-    util.deleteTable(tableName);
-  }
-
   /**
    * Test that tags survive through a bulk load that needs to split hfiles. This test depends on the
    * "hbase.client.rpc.codec" = KeyValueCodecWithTags so that the client can get tags in the
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles2.java
new file mode 100644
index 0000000..99cfe8b
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles2.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.tool;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Split from {@link TestLoadIncrementalHFiles}.
+ */
+@Category({ MiscTests.class, LargeTests.class })
+public class TestLoadIncrementalHFiles2 extends TestLoadIncrementalHFilesBase {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestLoadIncrementalHFiles2.class);
+
+  /**
+   * Test case that creates some regions and loads HFiles that cross the boundaries and have
+   * different region boundaries than the table pre-split.
+   */
+  @Test
+  public void testRegionCrossingHFileSplit() throws Exception {
+    testRegionCrossingHFileSplit(BloomType.NONE);
+  }
+
+  /**
+   * Test case that creates some regions and loads HFiles that cross the boundaries have a ROW bloom
+   * filter and a different region boundaries than the table pre-split.
+   */
+  @Test
+  public void testRegionCrossingHFileSplitRowBloom() throws Exception {
+    testRegionCrossingHFileSplit(BloomType.ROW);
+  }
+
+  /**
+   * Test case that creates some regions and loads HFiles that cross the boundaries have a ROWCOL
+   * bloom filter and a different region boundaries than the table pre-split.
+   */
+  @Test
+  public void testRegionCrossingHFileSplitRowColBloom() throws Exception {
+    testRegionCrossingHFileSplit(BloomType.ROWCOL);
+  }
+
+  private void testRegionCrossingHFileSplit(BloomType bloomType) throws Exception {
+    runTest("testHFileSplit" + bloomType + "Bloom", bloomType,
+        new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"), Bytes.toBytes("jjj"),
+            Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), },
+        new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
+            new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
+  }
+
+  /**
+   * Test case that creates some regions and loads HFiles that cross the boundaries of those regions
+   */
+  @Test
+  public void testRegionCrossingLoad() throws Exception {
+    runTest("testRegionCrossingLoad", BloomType.NONE,
+        new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
+            new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
+  }
+
+  /**
+   * Test loading into a column family that has a ROW bloom filter.
+   */
+  @Test
+  public void testRegionCrossingRowBloom() throws Exception {
+    runTest("testRegionCrossingLoadRowBloom", BloomType.ROW,
+        new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
+            new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
+  }
+
+  /**
+   * Test loading into a column family that has a ROWCOL bloom filter.
+   */
+  @Test
+  public void testRegionCrossingRowColBloom() throws Exception {
+    runTest("testRegionCrossingLoadRowColBloom", BloomType.ROWCOL,
+        new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") },
+            new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, });
+  }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesBase.java
new file mode 100644
index 0000000..82dac05
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesBase.java
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.tool;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.access.AccessControlLists;
+import org.apache.hadoop.hbase.security.access.SecureTestUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileTestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
+/**
+ * Base class for TestLoadIncrementalHFiles.
+ */
+public class TestLoadIncrementalHFilesBase {
+  @Rule
+  public TestName tn = new TestName();
+
+  protected static final byte[] QUALIFIER = Bytes.toBytes("myqual");
+  protected static final byte[] FAMILY = Bytes.toBytes("myfam");
+  private static final String NAMESPACE = "bulkNS";
+
+  static final String EXPECTED_MSG_FOR_NON_EXISTING_FAMILY = "Unmatched family names found";
+  static final int MAX_FILES_PER_REGION_PER_FAMILY = 4;
+
+  protected static final byte[][] SPLIT_KEYS =
+      new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ppp") };
+
+  static HBaseTestingUtility util = new HBaseTestingUtility();
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
+    util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
+        MAX_FILES_PER_REGION_PER_FAMILY);
+    // change default behavior so that tag values are returned with normal rpcs
+    util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
+        KeyValueCodecWithTags.class.getCanonicalName());
+    util.startMiniCluster();
+
+    setupNamespace();
+  }
+
+  protected static void setupNamespace() throws Exception {
+    util.getAdmin().createNamespace(NamespaceDescriptor.create(NAMESPACE).build());
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    util.shutdownMiniCluster();
+  }
+
+  public static void secureSetUpBeforeClass() throws Exception {
+    // set the always on security provider
+    UserProvider.setUserProviderForTesting(util.getConfiguration(),
+        HadoopSecurityEnabledUserProviderForTesting.class);
+    // setup configuration
+    SecureTestUtil.enableSecurity(util.getConfiguration());
+    util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
+        MAX_FILES_PER_REGION_PER_FAMILY);
+    // change default behavior so that tag values are returned with normal rpcs
+    util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
+        KeyValueCodecWithTags.class.getCanonicalName());
+
+    util.startMiniCluster();
+
+    // Wait for the ACL table to become available
+    util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
+
+    setupNamespace();
+  }
+
+  protected void runTest(String testName, BloomType bloomType, byte[][][] hfileRanges)
+      throws Exception {
+    runTest(testName, bloomType, null, hfileRanges);
+  }
+
+  protected void runTest(String testName, BloomType bloomType, byte[][][] hfileRanges,
+      boolean useMap) throws Exception {
+    runTest(testName, bloomType, null, hfileRanges, useMap);
+  }
+
+  protected void runTest(String testName, BloomType bloomType, byte[][] tableSplitKeys,
+      byte[][][] hfileRanges) throws Exception {
+    runTest(testName, bloomType, tableSplitKeys, hfileRanges, false);
+  }
+
+  protected void runTest(String testName, BloomType bloomType, byte[][] tableSplitKeys,
+      byte[][][] hfileRanges, boolean useMap) throws Exception {
+    final byte[] TABLE_NAME = Bytes.toBytes("mytable_" + testName);
+    final boolean preCreateTable = tableSplitKeys != null;
+
+    // Run the test bulkloading the table to the default namespace
+    final TableName TABLE_WITHOUT_NS = TableName.valueOf(TABLE_NAME);
+    runTest(testName, TABLE_WITHOUT_NS, bloomType, preCreateTable, tableSplitKeys, hfileRanges,
+        useMap, 2);
+
+
+    /* Run the test bulkloading the table from a depth of 3
+      directory structure is now
+      baseDirectory
+          -- regionDir
+            -- familyDir
+              -- storeFileDir
+    */
+    if (preCreateTable) {
+      runTest(testName + 2, TABLE_WITHOUT_NS, bloomType, true, tableSplitKeys, hfileRanges,
+          false, 3);
+    }
+
+    // Run the test bulkloading the table to the specified namespace
+    final TableName TABLE_WITH_NS = TableName.valueOf(Bytes.toBytes(NAMESPACE), TABLE_NAME);
+    runTest(testName, TABLE_WITH_NS, bloomType, preCreateTable, tableSplitKeys, hfileRanges,
+        useMap, 2);
+  }
+
+  protected void runTest(String testName, TableName tableName, BloomType bloomType,
+      boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges,
+      boolean useMap, int depth) throws Exception {
+    TableDescriptor htd = buildHTD(tableName, bloomType);
+    runTest(testName, htd, preCreateTable, tableSplitKeys, hfileRanges, useMap, false, depth);
+  }
+
+  protected void runTest(String testName, TableDescriptor htd,
+      boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap,
+      boolean copyFiles, int depth) throws Exception {
+    loadHFiles(testName, htd, util, FAMILY, QUALIFIER, preCreateTable, tableSplitKeys, hfileRanges,
+        useMap, true, copyFiles, 0, 1000, depth);
+
+    final TableName tableName = htd.getTableName();
+    // verify staging folder has been cleaned up
+    Path stagingBasePath =
+        new Path(FSUtils.getRootDir(util.getConfiguration()), HConstants.BULKLOAD_STAGING_DIR_NAME);
+    FileSystem fs = util.getTestFileSystem();
+    if (fs.exists(stagingBasePath)) {
+      FileStatus[] files = fs.listStatus(stagingBasePath);
+      for (FileStatus file : files) {
+        assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
+            file.getPath().getName() != "DONOTERASE");
+      }
+    }
+
+    util.deleteTable(tableName);
+  }
+
+  protected TableDescriptor buildHTD(TableName tableName, BloomType bloomType) {
+    return TableDescriptorBuilder.newBuilder(tableName)
+        .setColumnFamily(
+            ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBloomFilterType(bloomType).build())
+        .build();
+  }
+
+  public static int loadHFiles(String testName, TableDescriptor htd, HBaseTestingUtility util,
+      byte[] fam, byte[] qual, boolean preCreateTable, byte[][] tableSplitKeys,
+      byte[][][] hfileRanges, boolean useMap, boolean deleteFile, boolean copyFiles,
+      int initRowCount, int factor, int depth) throws Exception {
+    Path baseDirectory = util.getDataTestDirOnTestFS(testName);
+    FileSystem fs = util.getTestFileSystem();
+    baseDirectory = baseDirectory.makeQualified(fs.getUri(), fs.getWorkingDirectory());
+    Path parentDir = baseDirectory;
+    if (depth == 3) {
+      assert !useMap;
+      parentDir = new Path(baseDirectory, "someRegion");
+    }
+    Path familyDir = new Path(parentDir, Bytes.toString(fam));
+
+    int hfileIdx = 0;
+    Map<byte[], List<Path>> map = null;
+    List<Path> list = null;
+    if (useMap || copyFiles) {
+      list = new ArrayList<>();
+    }
+    if (useMap) {
+      map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+      map.put(fam, list);
+    }
+    Path last = null;
+    for (byte[][] range : hfileRanges) {
+      byte[] from = range[0];
+      byte[] to = range[1];
+      Path path = new Path(familyDir, "hfile_" + hfileIdx++);
+      HFileTestUtil.createHFile(util.getConfiguration(), fs, path, fam, qual, from, to, factor);
+      if (useMap) {
+        last = path;
+        list.add(path);
+      }
+    }
+    int expectedRows = hfileIdx * factor;
+
+    TableName tableName = htd.getTableName();
+    if (!util.getAdmin().tableExists(tableName) && (preCreateTable || map != null)) {
+      util.getAdmin().createTable(htd, tableSplitKeys);
+    }
+
+    Configuration conf = util.getConfiguration();
+    if (copyFiles) {
+      conf.setBoolean(LoadIncrementalHFiles.ALWAYS_COPY_FILES, true);
+    }
+    BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf);
+    List<String> args = Lists.newArrayList(baseDirectory.toString(), tableName.toString());
+    if (depth == 3) {
+      args.add("-loadTable");
+    }
+
+    if (useMap) {
+      if (deleteFile) {
+        fs.delete(last, true);
+      }
+      Map<BulkLoadHFiles.LoadQueueItem, ByteBuffer> loaded = loader.bulkLoad(tableName, map);
+      if (deleteFile) {
+        expectedRows -= 1000;
+        for (BulkLoadHFiles.LoadQueueItem item : loaded.keySet()) {
+          if (item.getFilePath().getName().equals(last.getName())) {
+            fail(last + " should be missing");
+          }
+        }
+      }
+    } else {
+      loader.run(args.toArray(new String[] {}));
+    }
+
+    if (copyFiles) {
+      for (Path p : list) {
+        assertTrue(p + " should exist", fs.exists(p));
+      }
+    }
+
+    Table table = util.getConnection().getTable(tableName);
+    try {
+      assertEquals(initRowCount + expectedRows, util.countRows(table));
+    } finally {
+      table.close();
+    }
+
+    return expectedRows;
+  }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles.java
index 4e10f01..6052783 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles.java
@@ -18,12 +18,6 @@
 package org.apache.hadoop.hbase.tool;
 
 import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
-import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.AccessControlLists;
-import org.apache.hadoop.hbase.security.access.SecureTestUtil;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.junit.BeforeClass;
@@ -48,23 +42,6 @@ public class TestSecureLoadIncrementalHFiles extends TestLoadIncrementalHFiles {
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    // set the always on security provider
-    UserProvider.setUserProviderForTesting(util.getConfiguration(),
-      HadoopSecurityEnabledUserProviderForTesting.class);
-    // setup configuration
-    SecureTestUtil.enableSecurity(util.getConfiguration());
-    util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
-      MAX_FILES_PER_REGION_PER_FAMILY);
-    // change default behavior so that tag values are returned with normal rpcs
-    util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
-      KeyValueCodecWithTags.class.getCanonicalName());
-
-    util.startMiniCluster();
-
-    // Wait for the ACL table to become available
-    util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
-
-    setupNamespace();
+    TestLoadIncrementalHFilesBase.secureSetUpBeforeClass();
   }
-
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles2.java
similarity index 61%
copy from hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles.java
copy to hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles2.java
index 4e10f01..99c5458 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestSecureLoadIncrementalHFiles2.java
@@ -18,12 +18,6 @@
 package org.apache.hadoop.hbase.tool;
 
 import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
-import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.AccessControlLists;
-import org.apache.hadoop.hbase.security.access.SecureTestUtil;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.junit.BeforeClass;
@@ -40,31 +34,14 @@ import org.junit.experimental.categories.Category;
  * supported as part of a LoadIncrementalFiles call.
  */
 @Category({ MiscTests.class, LargeTests.class })
-public class TestSecureLoadIncrementalHFiles extends TestLoadIncrementalHFiles {
+public class TestSecureLoadIncrementalHFiles2 extends TestLoadIncrementalHFiles2 {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestSecureLoadIncrementalHFiles.class);
+      HBaseClassTestRule.forClass(TestSecureLoadIncrementalHFiles2.class);
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    // set the always on security provider
-    UserProvider.setUserProviderForTesting(util.getConfiguration(),
-      HadoopSecurityEnabledUserProviderForTesting.class);
-    // setup configuration
-    SecureTestUtil.enableSecurity(util.getConfiguration());
-    util.getConfiguration().setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
-      MAX_FILES_PER_REGION_PER_FAMILY);
-    // change default behavior so that tag values are returned with normal rpcs
-    util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
-      KeyValueCodecWithTags.class.getCanonicalName());
-
-    util.startMiniCluster();
-
-    // Wait for the ACL table to become available
-    util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
-
-    setupNamespace();
+    TestLoadIncrementalHFilesBase.secureSetUpBeforeClass();
   }
-
-}
+}
\ No newline at end of file
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell3.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell3.java
new file mode 100644
index 0000000..265985d
--- /dev/null
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestAdminShell3.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.jruby.embed.PathType;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ ClientTests.class, LargeTests.class })
+public class TestAdminShell3 extends AbstractTestShell {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestAdminShell3.class);
+
+  @Test
+  public void testRunShellTests() throws IOException {
+    System.setProperty("shell.test.include", "admin3_test.rb");
+    // Start all ruby tests
+    jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb");
+  }
+}
diff --git a/hbase-shell/src/test/ruby/hbase/admin3_test.rb b/hbase-shell/src/test/ruby/hbase/admin3_test.rb
new file mode 100644
index 0000000..e1996a9
--- /dev/null
+++ b/hbase-shell/src/test/ruby/hbase/admin3_test.rb
@@ -0,0 +1,298 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'shell'
+require 'stringio'
+require 'hbase_constants'
+require 'hbase/hbase'
+require 'hbase/table'
+
+include HBaseConstants
+
+module Hbase
+  # Simple administration methods tests
+  class AdminRegionTest < Test::Unit::TestCase
+    include TestHelpers
+    def setup
+      setup_hbase
+      # Create test table if it does not exist
+      @test_name = "hbase_shell_tests_table"
+      drop_test_table(@test_name)
+      create_test_table(@test_name)
+    end
+
+    def teardown
+      shutdown
+    end
+
+    define_test "unassign should allow encoded region names" do
+      region = command(:locate_region, @test_name, '')
+      regionName = region.getRegionInfo().getRegionNameAsString()
+      command(:unassign, regionName, true)
+    end
+
+    define_test "unassign should allow non-encoded region names" do
+      region = command(:locate_region, @test_name, '')
+      encodedRegionName = region.getRegionInfo().getEncodedName()
+      command(:unassign, encodedRegionName, true)
+    end
+
+    define_test "list regions should allow table name" do
+      command(:list_regions, @test_name)
+    end
+
+    define_test 'merge regions' do
+      @t_name = 'hbase_shell_merge'
+      @t_name2 = 'hbase_shell_merge_2'
+      drop_test_table(@t_name)
+      drop_test_table(@t_name2)
+      admin.create(@t_name, 'a', NUMREGIONS => 10, SPLITALGO => 'HexStringSplit')
+      r1 = command(:locate_region, @t_name, '1')
+      r2 = command(:locate_region, @t_name, '2')
+      r3 = command(:locate_region, @t_name, '4')
+      r4 = command(:locate_region, @t_name, '5')
+      r5 = command(:locate_region, @t_name, '7')
+      r6 = command(:locate_region, @t_name, '8')
+      region1 = r1.getRegion.getRegionNameAsString
+      region2 = r2.getRegion.getRegionNameAsString
+      region3 = r3.getRegion.getRegionNameAsString
+      region4 = r4.getRegion.getRegionNameAsString
+      region5 = r5.getRegion.getRegionNameAsString
+      region6 = r6.getRegion.getRegionNameAsString
+      # only 1 region
+      assert_raise(ArgumentError) do
+        command(:merge_region, 'a')
+      end
+      # only 1 region with force=true
+      assert_raise(ArgumentError) do
+        command(:merge_region, 'a', true)
+      end
+      # non-existing region
+      assert_raise(RuntimeError) do
+        command(:merge_region, 'a','b')
+      end
+      # duplicate regions
+      assert_raise(RuntimeError) do
+        command(:merge_region, region1,region1,region1)
+      end
+      # 3 non-adjacent regions without forcible=true
+      assert_raise(RuntimeError) do
+        command(:merge_region, region1,region2,region4)
+      end
+      # 2 adjacent regions
+      command(:merge_region, region1,region2)
+      # 3 non-adjacent regions with forcible=true
+      command(:merge_region, region3,region5,region6, true)
+
+      admin.create(@t_name2, 'a', NUMREGIONS => 5, SPLITALGO => 'HexStringSplit')
+      r1 = command(:locate_region, @t_name2, '1')
+      r2 = command(:locate_region, @t_name2, '4')
+      r3 = command(:locate_region, @t_name2, '7')
+      region1 = r1.getRegion.getRegionNameAsString
+      region2 = r2.getRegion.getRegionNameAsString
+      region3 = r3.getRegion.getRegionNameAsString
+
+      # accept array of regions
+      command(:merge_region, [region1,region2,region3])
+    end
+  end
+
+  # Simple administration methods tests
+  # rubocop:disable Metrics/ClassLength
+  class AdminAlterTableTest < Test::Unit::TestCase
+    include TestHelpers
+
+    def setup
+      setup_hbase
+      # Create test table if it does not exist
+      @test_name = "hbase_shell_tests_table"
+      drop_test_table(@test_name)
+      create_test_table(@test_name)
+    end
+
+    def teardown
+      shutdown
+    end
+
+    #-------------------------------------------------------------------------------
+
+    define_test "alter should fail with non-string table names" do
+      assert_raise(ArgumentError) do
+        command(:alter, 123, METHOD => 'delete', NAME => 'y')
+      end
+    end
+
+    define_test "alter should fail with non-existing tables" do
+      assert_raise(ArgumentError) do
+        command(:alter, 'NOT.EXISTS', METHOD => 'delete', NAME => 'y')
+      end
+    end
+
+    define_test "alter should not fail with enabled tables" do
+      command(:enable, @test_name)
+      command(:alter, @test_name, METHOD => 'delete', NAME => 'y')
+    end
+
+    define_test "alter should be able to delete column families" do
+      assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
+      command(:alter, @test_name, METHOD => 'delete', NAME => 'y')
+      command(:enable, @test_name)
+      assert_equal(['x:'], table(@test_name).get_all_columns.sort)
+    end
+
+    define_test "alter should be able to add column families" do
+      assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
+      command(:alter, @test_name, NAME => 'z')
+      command(:enable, @test_name)
+      assert_equal(['x:', 'y:', 'z:'], table(@test_name).get_all_columns.sort)
+    end
+
+    define_test "alter should be able to add column families (name-only alter spec)" do
+      assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
+      command(:alter, @test_name, 'z')
+      command(:enable, @test_name)
+      assert_equal(['x:', 'y:', 'z:'], table(@test_name).get_all_columns.sort)
+    end
+
+    define_test 'alter should support more than one alteration in one call' do
+      assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
+      alter_out_put = capture_stdout do
+        command(:alter, @test_name, { NAME => 'z' },
+                { METHOD => 'delete', NAME => 'y' },
+                'MAX_FILESIZE' => 12_345_678)
+      end
+      command(:enable, @test_name)
+      assert_equal(1, /Updating all regions/.match(alter_out_put).size,
+                   "HBASE-15641 - Should only perform one table
+                   modification per alter.")
+      assert_equal(['x:', 'z:'], table(@test_name).get_all_columns.sort)
+      assert_match(/12345678/, admin.describe(@test_name))
+    end
+
+    define_test 'alter should be able to set the TargetRegionSize and TargetRegionCount' do
+      command(:alter, @test_name, 'NORMALIZER_TARGET_REGION_COUNT' => 156)
+      assert_match(/156/, admin.describe(@test_name))
+      command(:alter, @test_name, 'NORMALIZER_TARGET_REGION_SIZE' => 234)
+      assert_match(/234/, admin.describe(@test_name))
+    end
+
+    define_test 'alter should support shortcut DELETE alter specs' do
+      assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
+      command(:alter, @test_name, 'delete' => 'y')
+      assert_equal(['x:'], table(@test_name).get_all_columns.sort)
+    end
+
+    define_test "alter should be able to change table options" do
+      command(:alter, @test_name, METHOD => 'table_att', 'MAX_FILESIZE' => 12345678)
+      assert_match(/12345678/, admin.describe(@test_name))
+    end
+
+    define_test "alter should be able to change table options w/o table_att" do
+      command(:alter, @test_name, 'MAX_FILESIZE' => 12345678)
+      assert_match(/12345678/, admin.describe(@test_name))
+    end
+
+    define_test "alter should be able to change coprocessor attributes" do
+      drop_test_table(@test_name)
+      create_test_table(@test_name)
+
+      cp_key = "coprocessor"
+      class_name = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"
+
+      cp_value = "|" + class_name + "|12|arg1=1,arg2=2"
+
+      # eval() is used to convert a string to regex
+      assert_no_match(eval("/" + class_name + "/"), admin.describe(@test_name))
+      assert_no_match(eval("/" + cp_key + "/"), admin.describe(@test_name))
+      command(:alter, @test_name, 'METHOD' => 'table_att', cp_key => cp_value)
+      assert_match(eval("/" + class_name + "/"), admin.describe(@test_name))
+      assert_match(eval("/" + cp_key + "\\$(\\d+)/"), admin.describe(@test_name))
+    end
+
+    define_test "alter should be able to remove a table attribute" do
+      drop_test_table(@test_name)
+      create_test_table(@test_name)
+
+      key = "MAX_FILESIZE"
+      command(:alter, @test_name, 'METHOD' => 'table_att', key => 12345678)
+
+      # eval() is used to convert a string to regex
+      assert_match(eval("/" + key + "/"), admin.describe(@test_name))
+
+      command(:alter, @test_name, 'METHOD' => 'table_att_unset', 'NAME' => key)
+      assert_no_match(eval("/" + key + "/"), admin.describe(@test_name))
+    end
+
+    define_test "alter should be able to remove a list of table attributes" do
+      drop_test_table(@test_name)
+
+      key_1 = "TestAttr1"
+      key_2 = "TestAttr2"
+      command(:create, @test_name, { NAME => 'i'}, METADATA => { key_1 => 1, key_2 => 2 })
+
+      # eval() is used to convert a string to regex
+      assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
+      assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
+
+      command(:alter, @test_name, 'METHOD' => 'table_att_unset', 'NAME' => [ key_1, key_2 ])
+      assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
+      assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
+    end
+
+    define_test "alter should be able to remove a table configuration" do
+      drop_test_table(@test_name)
+      create_test_table(@test_name)
+
+      key = "TestConf"
+      command(:alter, @test_name, CONFIGURATION => {key => 1})
+
+      # eval() is used to convert a string to regex
+      assert_match(eval("/" + key + "/"), admin.describe(@test_name))
+
+      command(:alter, @test_name, 'METHOD' => 'table_conf_unset', 'NAME' => key)
+      assert_no_match(eval("/" + key + "/"), admin.describe(@test_name))
+    end
+
+    define_test "alter should be able to remove a list of table configuration" do
+      drop_test_table(@test_name)
+
+      key_1 = "TestConf1"
+      key_2 = "TestConf2"
+      command(:create, @test_name, { NAME => 'i'}, CONFIGURATION => { key_1 => 1, key_2 => 2 })
+
+      # eval() is used to convert a string to regex
+      assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
+      assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
+
+      command(:alter, @test_name, 'METHOD' => 'table_conf_unset', 'NAME' => [ key_1, key_2 ])
+      assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
+      assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
+    end
+
+    define_test "get_table should get a real table" do
+      drop_test_table(@test_name)
+      create_test_table(@test_name)
+
+      table = table(@test_name)
+      assert_not_equal(nil, table)
+      table.close
+    end
+  end
+  # rubocop:enable Metrics/ClassLength
+end
diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb
index 8499fe3..7d4921f 100644
--- a/hbase-shell/src/test/ruby/hbase/admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb
@@ -528,274 +528,5 @@ module Hbase
       end
     end
   end
-
-  # Simple administration methods tests
-  class AdminRegionTest < Test::Unit::TestCase
-    include TestHelpers
-    def setup
-      setup_hbase
-      # Create test table if it does not exist
-      @test_name = "hbase_shell_tests_table"
-      drop_test_table(@test_name)
-      create_test_table(@test_name)
-    end
-
-    def teardown
-      shutdown
-    end
-
-    define_test "unassign should allow encoded region names" do
-      region = command(:locate_region, @test_name, '')
-      regionName = region.getRegionInfo().getRegionNameAsString()
-      command(:unassign, regionName, true)
-    end
-
-    define_test "unassign should allow non-encoded region names" do
-      region = command(:locate_region, @test_name, '')
-      encodedRegionName = region.getRegionInfo().getEncodedName()
-      command(:unassign, encodedRegionName, true)
-    end
-
-    define_test "list regions should allow table name" do
-      command(:list_regions, @test_name)
-    end
-
-    define_test 'merge regions' do
-      @t_name = 'hbase_shell_merge'
-      @t_name2 = 'hbase_shell_merge_2'
-      drop_test_table(@t_name)
-      drop_test_table(@t_name2)
-      admin.create(@t_name, 'a', NUMREGIONS => 10, SPLITALGO => 'HexStringSplit')
-      r1 = command(:locate_region, @t_name, '1')
-      r2 = command(:locate_region, @t_name, '2')
-      r3 = command(:locate_region, @t_name, '4')
-      r4 = command(:locate_region, @t_name, '5')
-      r5 = command(:locate_region, @t_name, '7')
-      r6 = command(:locate_region, @t_name, '8')
-      region1 = r1.getRegion.getRegionNameAsString
-      region2 = r2.getRegion.getRegionNameAsString
-      region3 = r3.getRegion.getRegionNameAsString
-      region4 = r4.getRegion.getRegionNameAsString
-      region5 = r5.getRegion.getRegionNameAsString
-      region6 = r6.getRegion.getRegionNameAsString
-      # only 1 region
-      assert_raise(ArgumentError) do
-        command(:merge_region, 'a')
-      end
-      # only 1 region with force=true
-      assert_raise(ArgumentError) do
-        command(:merge_region, 'a', true)
-      end
-      # non-existing region
-      assert_raise(RuntimeError) do
-        command(:merge_region, 'a','b')
-      end
-      # duplicate regions
-      assert_raise(RuntimeError) do
-        command(:merge_region, region1,region1,region1)
-      end
-      # 3 non-adjacent regions without forcible=true
-      assert_raise(RuntimeError) do
-        command(:merge_region, region1,region2,region4)
-      end
-      # 2 adjacent regions
-      command(:merge_region, region1,region2)
-      # 3 non-adjacent regions with forcible=true
-      command(:merge_region, region3,region5,region6, true)
-
-      admin.create(@t_name2, 'a', NUMREGIONS => 5, SPLITALGO => 'HexStringSplit')
-      r1 = command(:locate_region, @t_name2, '1')
-      r2 = command(:locate_region, @t_name2, '4')
-      r3 = command(:locate_region, @t_name2, '7')
-      region1 = r1.getRegion.getRegionNameAsString
-      region2 = r2.getRegion.getRegionNameAsString
-      region3 = r3.getRegion.getRegionNameAsString
-
-      # accept array of regions
-      command(:merge_region, [region1,region2,region3])
-    end
-  end
-
-  # Simple administration methods tests
-  # rubocop:disable Metrics/ClassLength
-  class AdminAlterTableTest < Test::Unit::TestCase
-    include TestHelpers
-
-    def setup
-      setup_hbase
-      # Create test table if it does not exist
-      @test_name = "hbase_shell_tests_table"
-      drop_test_table(@test_name)
-      create_test_table(@test_name)
-    end
-
-    def teardown
-      shutdown
-    end
-
-    #-------------------------------------------------------------------------------
-
-    define_test "alter should fail with non-string table names" do
-      assert_raise(ArgumentError) do
-        command(:alter, 123, METHOD => 'delete', NAME => 'y')
-      end
-    end
-
-    define_test "alter should fail with non-existing tables" do
-      assert_raise(ArgumentError) do
-        command(:alter, 'NOT.EXISTS', METHOD => 'delete', NAME => 'y')
-      end
-    end
-
-    define_test "alter should not fail with enabled tables" do
-      command(:enable, @test_name)
-      command(:alter, @test_name, METHOD => 'delete', NAME => 'y')
-    end
-
-    define_test "alter should be able to delete column families" do
-      assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
-      command(:alter, @test_name, METHOD => 'delete', NAME => 'y')
-      command(:enable, @test_name)
-      assert_equal(['x:'], table(@test_name).get_all_columns.sort)
-    end
-
-    define_test "alter should be able to add column families" do
-      assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
-      command(:alter, @test_name, NAME => 'z')
-      command(:enable, @test_name)
-      assert_equal(['x:', 'y:', 'z:'], table(@test_name).get_all_columns.sort)
-    end
-
-    define_test "alter should be able to add column families (name-only alter spec)" do
-      assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
-      command(:alter, @test_name, 'z')
-      command(:enable, @test_name)
-      assert_equal(['x:', 'y:', 'z:'], table(@test_name).get_all_columns.sort)
-    end
-
-    define_test 'alter should support more than one alteration in one call' do
-      assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
-      alter_out_put = capture_stdout do
-        command(:alter, @test_name, { NAME => 'z' },
-                { METHOD => 'delete', NAME => 'y' },
-                'MAX_FILESIZE' => 12_345_678)
-      end
-      command(:enable, @test_name)
-      assert_equal(1, /Updating all regions/.match(alter_out_put).size,
-                   "HBASE-15641 - Should only perform one table
-                   modification per alter.")
-      assert_equal(['x:', 'z:'], table(@test_name).get_all_columns.sort)
-      assert_match(/12345678/, admin.describe(@test_name))
-    end
-
-    define_test 'alter should be able to set the TargetRegionSize and TargetRegionCount' do
-      command(:alter, @test_name, 'NORMALIZER_TARGET_REGION_COUNT' => 156)
-      assert_match(/156/, admin.describe(@test_name))
-      command(:alter, @test_name, 'NORMALIZER_TARGET_REGION_SIZE' => 234)
-      assert_match(/234/, admin.describe(@test_name))
-    end
-
-    define_test 'alter should support shortcut DELETE alter specs' do
-      assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
-      command(:alter, @test_name, 'delete' => 'y')
-      assert_equal(['x:'], table(@test_name).get_all_columns.sort)
-    end
-
-    define_test "alter should be able to change table options" do
-      command(:alter, @test_name, METHOD => 'table_att', 'MAX_FILESIZE' => 12345678)
-      assert_match(/12345678/, admin.describe(@test_name))
-    end
-
-    define_test "alter should be able to change table options w/o table_att" do
-      command(:alter, @test_name, 'MAX_FILESIZE' => 12345678)
-      assert_match(/12345678/, admin.describe(@test_name))
-    end
-
-    define_test "alter should be able to change coprocessor attributes" do
-      drop_test_table(@test_name)
-      create_test_table(@test_name)
-
-      cp_key = "coprocessor"
-      class_name = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"
-
-      cp_value = "|" + class_name + "|12|arg1=1,arg2=2"
-
-      # eval() is used to convert a string to regex
-      assert_no_match(eval("/" + class_name + "/"), admin.describe(@test_name))
-      assert_no_match(eval("/" + cp_key + "/"), admin.describe(@test_name))
-      command(:alter, @test_name, 'METHOD' => 'table_att', cp_key => cp_value)
-      assert_match(eval("/" + class_name + "/"), admin.describe(@test_name))
-      assert_match(eval("/" + cp_key + "\\$(\\d+)/"), admin.describe(@test_name))
-    end
-
-    define_test "alter should be able to remove a table attribute" do
-      drop_test_table(@test_name)
-      create_test_table(@test_name)
-
-      key = "MAX_FILESIZE"
-      command(:alter, @test_name, 'METHOD' => 'table_att', key => 12345678)
-
-      # eval() is used to convert a string to regex
-      assert_match(eval("/" + key + "/"), admin.describe(@test_name))
-
-      command(:alter, @test_name, 'METHOD' => 'table_att_unset', 'NAME' => key)
-      assert_no_match(eval("/" + key + "/"), admin.describe(@test_name))
-    end
-
-    define_test "alter should be able to remove a list of table attributes" do
-      drop_test_table(@test_name)
-
-      key_1 = "TestAttr1"
-      key_2 = "TestAttr2"
-      command(:create, @test_name, { NAME => 'i'}, METADATA => { key_1 => 1, key_2 => 2 })
-
-      # eval() is used to convert a string to regex
-      assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
-      assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
-
-      command(:alter, @test_name, 'METHOD' => 'table_att_unset', 'NAME' => [ key_1, key_2 ])
-      assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
-      assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
-    end
-
-    define_test "alter should be able to remove a table configuration" do
-      drop_test_table(@test_name)
-      create_test_table(@test_name)
-
-      key = "TestConf"
-      command(:alter, @test_name, CONFIGURATION => {key => 1})
-
-      # eval() is used to convert a string to regex
-      assert_match(eval("/" + key + "/"), admin.describe(@test_name))
-
-      command(:alter, @test_name, 'METHOD' => 'table_conf_unset', 'NAME' => key)
-      assert_no_match(eval("/" + key + "/"), admin.describe(@test_name))
-    end
-
-    define_test "alter should be able to remove a list of table configuration" do
-      drop_test_table(@test_name)
-
-      key_1 = "TestConf1"
-      key_2 = "TestConf2"
-      command(:create, @test_name, { NAME => 'i'}, CONFIGURATION => { key_1 => 1, key_2 => 2 })
-
-      # eval() is used to convert a string to regex
-      assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
-      assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
-
-      command(:alter, @test_name, 'METHOD' => 'table_conf_unset', 'NAME' => [ key_1, key_2 ])
-      assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
-      assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
-    end
-
-    define_test "get_table should get a real table" do
-      drop_test_table(@test_name)
-      create_test_table(@test_name)
-
-      table = table(@test_name)
-      assert_not_equal(nil, table)
-      table.close
-    end
-  end
   # rubocop:enable Metrics/ClassLength
 end