You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/05/25 06:32:06 UTC
[02/27] hbase git commit: Revert "HBASE-14614 Procedure v2 - Core
Assignment Manager (Matteo Bertozzi)" Revert a mistaken commit!!!
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
index 8872c63..c5c6484 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -31,19 +30,18 @@ import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyTableState;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
-import org.junit.rules.TestRule;
@Category({MasterTests.class, MediumTests.class})
public class TestModifyTableProcedure extends TestTableDDLProcedureBase {
- @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
- withLookingForStuckThread(true).build();
- @Rule public TestName name = new TestName();
+ @Rule
+ public TestName name = new TestName();
@Test(timeout=60000)
public void testModifyTable() throws Exception {
@@ -210,7 +208,8 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase {
new ModifyTableProcedure(procExec.getEnvironment(), htd));
// Restart the executor and execute the step twice
- MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);
+ int numberOfSteps = ModifyTableState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps);
// Validate descriptor
HTableDescriptor currentHtd = UTIL.getAdmin().getTableDescriptor(tableName);
@@ -247,7 +246,8 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase {
new ModifyTableProcedure(procExec.getEnvironment(), htd));
// Restart the executor and execute the step twice
- MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);
+ int numberOfSteps = ModifyTableState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps);
// Validate descriptor
HTableDescriptor currentHtd = UTIL.getAdmin().getTableDescriptor(tableName);
@@ -282,7 +282,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase {
long procId = procExec.submitProcedure(
new ModifyTableProcedure(procExec.getEnvironment(), htd));
- int numberOfSteps = 0; // failing at pre operation
+ int numberOfSteps = 1; // failing at pre operation
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps);
// cf2 should not be present
@@ -315,7 +315,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase {
new ModifyTableProcedure(procExec.getEnvironment(), htd));
// Restart the executor and rollback the step twice
- int numberOfSteps = 0; // failing at pre operation
+ int numberOfSteps = 1; // failing at pre operation
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps);
// cf2 should not be present
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
index 47b1248..e6e90ef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
@@ -18,16 +18,12 @@
package org.apache.hadoop.hbase.master.procedure;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.List;
import java.util.Random;
+import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
@@ -46,19 +42,17 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
-import org.junit.rules.TestRule;
import static org.junit.Assert.*;
@Category({MasterTests.class, MediumTests.class})
public class TestProcedureAdmin {
private static final Log LOG = LogFactory.getLog(TestProcedureAdmin.class);
- @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
- withLookingForStuckThread(true).build();
- @Rule public TestName name = new TestName();
protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ @Rule
+ public TestName name = new TestName();
private static void setupConf(Configuration conf) {
conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
index bed8b4f..9141e0f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
@@ -18,17 +18,11 @@
package org.apache.hadoop.hbase.master.procedure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
@@ -53,7 +47,6 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
-import org.junit.rules.TestRule;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -63,8 +56,6 @@ import static org.junit.Assert.assertTrue;
@Category({MasterTests.class, MediumTests.class})
public class TestRestoreSnapshotProcedure extends TestTableDDLProcedureBase {
private static final Log LOG = LogFactory.getLog(TestRestoreSnapshotProcedure.class);
- @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
- withLookingForStuckThread(true).build();
protected final TableName snapshotTableName = TableName.valueOf("testRestoreSnapshot");
protected final byte[] CF1 = Bytes.toBytes("cf1");
@@ -211,7 +202,8 @@ public class TestRestoreSnapshotProcedure extends TestTableDDLProcedureBase {
new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, snapshot));
// Restart the executor and execute the step twice
- MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);
+ int numberOfSteps = RestoreSnapshotState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps);
resetProcExecutorTestingKillFlag();
validateSnapshotRestore();
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
index 8cee4d8..c6968d4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
@@ -19,45 +19,48 @@
package org.apache.hadoop.hbase.master.procedure;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.util.Threads;
import org.junit.After;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
-@Category({MasterTests.class, LargeTests.class})
+/**
+ * It used to first run with DLS and then DLR but HBASE-12751 broke DLR so we disabled it here.
+ */
+@Category(LargeTests.class)
+@RunWith(Parameterized.class)
public class TestServerCrashProcedure {
- private static final Log LOG = LogFactory.getLog(TestServerCrashProcedure.class);
-
- private HBaseTestingUtility util;
-
- private void setupConf(Configuration conf) {
- conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
- conf.set("hbase.balancer.tablesOnMaster", "none");
- conf.setInt("hbase.client.retries.number", 3);
+ // Ugly junit parameterization. I just want to pass false and then true but seems like needs
+ // to return sequences of two-element arrays.
+ @Parameters(name = "{index}: setting={0}")
+ public static Collection<Object []> data() {
+ return Arrays.asList(new Object[] [] {{Boolean.FALSE, -1}});
}
+ private final HBaseTestingUtility util = new HBaseTestingUtility();
+
@Before
public void setup() throws Exception {
- this.util = new HBaseTestingUtility();
- setupConf(this.util.getConfiguration());
this.util.startMiniCluster(3);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(
this.util.getHBaseCluster().getMaster().getMasterProcedureExecutor(), false);
@@ -68,27 +71,15 @@ public class TestServerCrashProcedure {
MiniHBaseCluster cluster = this.util.getHBaseCluster();
HMaster master = cluster == null? null: cluster.getMaster();
if (master != null && master.getMasterProcedureExecutor() != null) {
- ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(
- master.getMasterProcedureExecutor(), false);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(master.getMasterProcedureExecutor(),
+ false);
}
this.util.shutdownMiniCluster();
}
-
- @Test(timeout=60000)
- public void testCrashTargetRs() throws Exception {
- }
-
- @Test(timeout=60000)
- @Ignore // Fix for AMv2
- public void testRecoveryAndDoubleExecutionOnRsWithMeta() throws Exception {
- testRecoveryAndDoubleExecution(true);
- }
-
- @Test(timeout=60000)
- @Ignore // Fix for AMv2
- public void testRecoveryAndDoubleExecutionOnRsWithoutMeta() throws Exception {
- testRecoveryAndDoubleExecution(false);
+ public TestServerCrashProcedure(final Boolean b, final int ignore) {
+ this.util.getConfiguration().setBoolean("hbase.master.distributed.log.replay", b);
+ this.util.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
}
/**
@@ -96,49 +87,43 @@ public class TestServerCrashProcedure {
* needed state.
* @throws Exception
*/
- private void testRecoveryAndDoubleExecution(final boolean carryingMeta) throws Exception {
- final TableName tableName = TableName.valueOf(
- "testRecoveryAndDoubleExecution-carryingMeta-" + carryingMeta);
- final Table t = this.util.createTable(tableName, HBaseTestingUtility.COLUMNS,
- HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
- try {
+ @Test(timeout = 300000)
+ public void testRecoveryAndDoubleExecutionOnline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline");
+ this.util.createTable(tableName, HBaseTestingUtility.COLUMNS,
+ HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
+ try (Table t = this.util.getConnection().getTable(tableName)) {
// Load the table with a bit of data so some logs to split and some edits in each region.
this.util.loadTable(t, HBaseTestingUtility.COLUMNS[0]);
- final int count = util.countRows(t);
- assertTrue("expected some rows", count > 0);
- final String checksum = util.checksumRows(t);
+ int count = util.countRows(t);
// Run the procedure executor outside the master so we can mess with it. Need to disable
// Master's running of the server crash processing.
- final HMaster master = this.util.getHBaseCluster().getMaster();
+ HMaster master = this.util.getHBaseCluster().getMaster();
final ProcedureExecutor<MasterProcedureEnv> procExec = master.getMasterProcedureExecutor();
master.setServerCrashProcessingEnabled(false);
- // find the first server that match the request and executes the test
- ServerName rsToKill = null;
- for (HRegionInfo hri: util.getHBaseAdmin().getTableRegions(tableName)) {
- final ServerName serverName = AssignmentTestingUtil.getServerHoldingRegion(util, hri);
- if (AssignmentTestingUtil.isServerHoldingMeta(util, serverName) == carryingMeta) {
- rsToKill = serverName;
- break;
- }
- }
- // kill the RS
- AssignmentTestingUtil.killRs(util, rsToKill);
+ // Kill a server. Master will notice but do nothing other than add it to list of dead servers.
+ HRegionServer hrs = this.util.getHBaseCluster().getRegionServer(0);
+ boolean carryingMeta = master.getAssignmentManager().isCarryingMeta(hrs.getServerName());
+ this.util.getHBaseCluster().killRegionServer(hrs.getServerName());
+ hrs.join();
+ // Wait until the expiration of the server has arrived at the master. We won't process it
+ // by queuing a ServerCrashProcedure because we have disabled crash processing... but wait
+ // here so ServerManager gets notice and adds expired server to appropriate queues.
+ while (!master.getServerManager().isServerDead(hrs.getServerName())) Threads.sleep(10);
// Now, reenable processing else we can't get a lock on the ServerCrashProcedure.
master.setServerCrashProcessingEnabled(true);
// Do some of the master processing of dead servers so when SCP runs, it has expected 'state'.
- master.getServerManager().moveFromOnlineToDeadServers(rsToKill);
+ master.getServerManager().moveFromOnlineToDeadServers(hrs.getServerName());
// Enable test flags and then queue the crash procedure.
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
- long procId = procExec.submitProcedure(new ServerCrashProcedure(
- procExec.getEnvironment(), rsToKill, true, carryingMeta));
+ long procId =
+ procExec.submitProcedure(new ServerCrashProcedure(
+ procExec.getEnvironment(), hrs.getServerName(), true, carryingMeta));
// Now run through the procedure twice crashing the executor on each step...
MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);
// Assert all data came back.
assertEquals(count, util.countRows(t));
- assertEquals(checksum, util.checksumRows(t));
- } finally {
- t.close();
}
}
-}
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java
new file mode 100644
index 0000000..c3b910e
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java
@@ -0,0 +1,420 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.CompactionState;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestSplitTableRegionProcedure {
+ private static final Log LOG = LogFactory.getLog(TestSplitTableRegionProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static String ColumnFamilyName1 = "cf1";
+ private static String ColumnFamilyName2 = "cf2";
+
+ private static final int startRowNum = 11;
+ private static final int rowCount = 60;
+
+ @Rule
+ public TestName name = new TestName();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(3);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+
+ // Turn off balancer so it doesn't cut in and mess up our placements.
+ UTIL.getAdmin().setBalancerRunning(false, true);
+ // Turn off the meta scanner so it don't remove parent on us.
+ UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout=60000)
+ public void testSplitTableRegion() throws Exception {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ HRegionInfo [] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
+ insertData(tableName);
+ int splitRowNum = startRowNum + rowCount / 2;
+ byte[] splitKey = Bytes.toBytes("" + splitRowNum);
+
+ assertTrue("not able to find a splittable region", regions != null);
+ assertTrue("not able to find a splittable region", regions.length == 1);
+
+ // Split region of the table
+ long procId = procExec.submitProcedure(
+ new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ verify(tableName, splitRowNum);
+ }
+
+ @Test(timeout=60000)
+ public void testSplitTableRegionNoStoreFile() throws Exception {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ HRegionInfo [] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
+ int splitRowNum = startRowNum + rowCount / 2;
+ byte[] splitKey = Bytes.toBytes("" + splitRowNum);
+
+ assertTrue("not able to find a splittable region", regions != null);
+ assertTrue("not able to find a splittable region", regions.length == 1);
+
+ // Split region of the table
+ long procId = procExec.submitProcedure(
+ new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ assertTrue(UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 2);
+ assertTrue(UTIL.countRows(tableName) == 0);
+ }
+
+ @Test(timeout=60000)
+ public void testSplitTableRegionUnevenDaughter() throws Exception {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ HRegionInfo [] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
+ insertData(tableName);
+ // Split to two daughters with one of them only has 1 row
+ int splitRowNum = startRowNum + rowCount / 4;
+ byte[] splitKey = Bytes.toBytes("" + splitRowNum);
+
+ assertTrue("not able to find a splittable region", regions != null);
+ assertTrue("not able to find a splittable region", regions.length == 1);
+
+ // Split region of the table
+ long procId = procExec.submitProcedure(
+ new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ verify(tableName, splitRowNum);
+ }
+
+ @Test(timeout=60000)
+ public void testSplitTableRegionEmptyDaughter() throws Exception {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ HRegionInfo [] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
+ insertData(tableName);
+ // Split to two daughters with one of them only has 1 row
+ int splitRowNum = startRowNum + rowCount;
+ byte[] splitKey = Bytes.toBytes("" + splitRowNum);
+
+ assertTrue("not able to find a splittable region", regions != null);
+ assertTrue("not able to find a splittable region", regions.length == 1);
+
+ // Split region of the table
+ long procId = procExec.submitProcedure(
+ new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ // Make sure one daughter has 0 rows.
+ List<HRegion> daughters = UTIL.getMiniHBaseCluster().getRegions(tableName);
+ assertTrue(daughters.size() == 2);
+ assertTrue(UTIL.countRows(tableName) == rowCount);
+ assertTrue(UTIL.countRows(daughters.get(0)) == 0 || UTIL.countRows(daughters.get(1)) == 0);
+ }
+
+ @Test(timeout=60000)
+ public void testSplitTableRegionDeletedRowsDaughter() throws Exception {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ HRegionInfo [] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
+ insertData(tableName);
+ // Split to two daughters with one of them only has 1 row
+ int splitRowNum = rowCount;
+ deleteData(tableName, splitRowNum);
+ byte[] splitKey = Bytes.toBytes("" + splitRowNum);
+
+ assertTrue("not able to find a splittable region", regions != null);
+ assertTrue("not able to find a splittable region", regions.length == 1);
+
+ // Split region of the table
+ long procId = procExec.submitProcedure(
+ new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ UTIL.getAdmin().majorCompact(tableName);
+ // waiting for the major compaction to complete
+ UTIL.waitFor(6000, new Waiter.Predicate<IOException>() {
+ @Override
+ public boolean evaluate() throws IOException {
+ return UTIL.getAdmin().getCompactionState(tableName) == CompactionState.NONE;
+ }
+ });
+
+ // Make sure one daughter has 0 rows.
+ List<HRegion> daughters = UTIL.getMiniHBaseCluster().getRegions(tableName);
+ assertTrue(daughters.size() == 2);
+ final int currentRowCount = splitRowNum - startRowNum;
+ assertTrue(UTIL.countRows(tableName) == currentRowCount);
+ assertTrue(UTIL.countRows(daughters.get(0)) == 0 || UTIL.countRows(daughters.get(1)) == 0);
+ }
+
+ @Test(timeout=60000)
+ public void testInvalidSplitKey() throws Exception {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ HRegionInfo [] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
+ insertData(tableName);
+
+ assertTrue("not able to find a splittable region", regions != null);
+ assertTrue("not able to find a splittable region", regions.length == 1);
+
+ // Split region of the table with null split key
+ try {
+ long procId1 = procExec.submitProcedure(
+ new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], null));
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ fail("unexpected procedure start with invalid split-key");
+ } catch (DoNotRetryIOException e) {
+ LOG.debug("Expected Split procedure construction failure: " + e.getMessage());
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ HRegionInfo [] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
+ insertData(tableName);
+ int splitRowNum = startRowNum + rowCount / 2;
+ byte[] splitKey = Bytes.toBytes("" + splitRowNum);
+
+ assertTrue("not able to find a splittable region", regions != null);
+ assertTrue("not able to find a splittable region", regions.length == 1);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Split region of the table
+ long procId = procExec.submitProcedure(
+ new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
+
+ // Failing before SPLIT_TABLE_REGION_UPDATE_META we should trigger the
+ // rollback
+ // NOTE: the 5 (number before SPLIT_TABLE_REGION_UPDATE_META step) is
+ // hardcoded, so you have to look at this test at least once when you add a new step.
+ int numberOfSteps = 5;
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps);
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
+
+ HRegionInfo [] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, ColumnFamilyName1, ColumnFamilyName2);
+ insertData(tableName);
+ int splitRowNum = startRowNum + rowCount / 2;
+ byte[] splitKey = Bytes.toBytes("" + splitRowNum);
+
+ assertTrue("not able to find a splittable region", regions != null);
+ assertTrue("not able to find a splittable region", regions.length == 1);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Split region of the table
+ long procId = procExec.submitProcedure(
+ new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = SplitTableRegionState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ verify(tableName, splitRowNum);
+ }
+
+ private void insertData(final TableName tableName) throws IOException, InterruptedException {
+ Table t = UTIL.getConnection().getTable(tableName);
+ Put p;
+ for (int i= 0; i < rowCount / 2; i++) {
+ p = new Put(Bytes.toBytes("" + (startRowNum + i)));
+ p.addColumn(Bytes.toBytes(ColumnFamilyName1), Bytes.toBytes("q1"), Bytes.toBytes(i));
+ p.addColumn(Bytes.toBytes(ColumnFamilyName2), Bytes.toBytes("q2"), Bytes.toBytes(i));
+ t.put(p);
+ p = new Put(Bytes.toBytes("" + (startRowNum + rowCount - i - 1)));
+ p.addColumn(Bytes.toBytes(ColumnFamilyName1), Bytes.toBytes("q1"), Bytes.toBytes(i));
+ p.addColumn(Bytes.toBytes(ColumnFamilyName2), Bytes.toBytes("q2"), Bytes.toBytes(i));
+ t.put(p);
+ if (i % 5 == 0) {
+ UTIL.getAdmin().flush(tableName);
+ }
+ }
+ }
+
+ private void deleteData(
+ final TableName tableName,
+ final int startDeleteRowNum) throws IOException, InterruptedException {
+ Table t = UTIL.getConnection().getTable(tableName);
+ final int numRows = rowCount + startRowNum - startDeleteRowNum;
+ Delete d;
+ for (int i= startDeleteRowNum; i <= numRows + startDeleteRowNum; i++) {
+ d = new Delete(Bytes.toBytes("" + i));
+ t.delete(d);
+ if (i % 5 == 0) {
+ UTIL.getAdmin().flush(tableName);
+ }
+ }
+ }
+
+ private void verify(final TableName tableName, final int splitRowNum) throws IOException {
+ List<HRegion> daughters = UTIL.getMiniHBaseCluster().getRegions(tableName);
+ assertTrue(daughters.size() == 2);
+ LOG.info("Row Count = " + UTIL.countRows(tableName));
+ assertTrue(UTIL.countRows(tableName) == rowCount);
+ int startRow;
+ int numRows;
+ for (int i = 0; i < daughters.size(); i++) {
+ if (Bytes.compareTo(
+ daughters.get(i).getRegionInfo().getStartKey(), HConstants.EMPTY_BYTE_ARRAY) == 0) {
+ startRow = startRowNum; // first region
+ numRows = splitRowNum - startRowNum;
+ } else {
+ startRow = splitRowNum;
+ numRows = rowCount + startRowNum - splitRowNum;
+ }
+ verifyData(
+ daughters.get(i),
+ startRow,
+ numRows,
+ ColumnFamilyName1.getBytes(),
+ ColumnFamilyName2.getBytes());
+ }
+ }
+
+ private void verifyData(
+ final HRegion newReg,
+ final int startRow,
+ final int numRows,
+ final byte[]... families)
+ throws IOException {
+ for (int i = startRow; i < startRow + numRows; i++) {
+ byte[] row = Bytes.toBytes("" + i);
+ Get get = new Get(row);
+ Result result = newReg.get(get);
+ Cell[] raw = result.rawCells();
+ assertEquals(families.length, result.size());
+ for (int j = 0; j < families.length; j++) {
+ assertTrue(CellUtil.matchingRow(raw[j], row));
+ assertTrue(CellUtil.matchingFamily(raw[j], families[j]));
+ }
+ }
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java
index f7b4100..f453a67 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java
@@ -23,7 +23,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.junit.After;
@@ -76,10 +75,6 @@ public abstract class TestTableDDLProcedureBase {
}
protected ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
- return getMaster().getMasterProcedureExecutor();
- }
-
- protected HMaster getMaster() {
- return UTIL.getHBaseCluster().getMaster();
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
index 22583d3..6d9475f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
@@ -18,12 +18,8 @@
package org.apache.hadoop.hbase.master.procedure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.TableName;
@@ -38,7 +34,6 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
-import org.junit.rules.TestRule;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -46,8 +41,6 @@ import static org.junit.Assert.assertTrue;
@Category({MasterTests.class, MediumTests.class})
public class TestTruncateTableProcedure extends TestTableDDLProcedureBase {
private static final Log LOG = LogFactory.getLog(TestTruncateTableProcedure.class);
- @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
- withLookingForStuckThread(true).build();
@Rule
public TestName name = new TestName();
@@ -178,7 +171,9 @@ public class TestTruncateTableProcedure extends TestTableDDLProcedureBase {
new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits));
// Restart the executor and execute the step twice
- MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);
+ // NOTE: the 7 (number of TruncateTableState steps) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, 7);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
UTIL.waitUntilAllRegionsAssigned(tableName);
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
index bc7f32a..2fb4741 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
@@ -28,10 +28,8 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
+import java.util.Set;
import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
@@ -42,6 +40,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@@ -56,9 +55,9 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
@@ -67,10 +66,13 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
import org.apache.hadoop.hbase.mapreduce.TableInputFormatBase;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.master.TableNamespaceManager;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.quotas.QuotaExceededException;
import org.apache.hadoop.hbase.quotas.QuotaUtil;
+import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -78,7 +80,6 @@ import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Threads;
import org.apache.zookeeper.KeeperException;
import org.junit.After;
import org.junit.AfterClass;
@@ -88,6 +89,8 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestRule;
+import com.google.common.collect.Sets;
+
@Category(MediumTests.class)
public class TestNamespaceAuditor {
@Rule public final TestRule timeout = CategoryBasedTimeout.builder().
@@ -311,10 +314,19 @@ public class TestNamespaceAuditor {
shouldFailMerge = fail;
}
+ private boolean triggered = false;
+
+ public synchronized void waitUtilTriggered() throws InterruptedException {
+ while (!triggered) {
+ wait();
+ }
+ }
+
@Override
public synchronized void preMergeRegionsAction(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
final HRegionInfo[] regionsToMerge) throws IOException {
+ triggered = true;
notifyAll();
if (shouldFailMerge) {
throw new IOException("fail merge");
@@ -325,16 +337,16 @@ public class TestNamespaceAuditor {
@Test
public void testRegionMerge() throws Exception {
String nsp1 = prefix + "_regiontest";
- final int initialRegions = 3;
NamespaceDescriptor nspDesc =
NamespaceDescriptor.create(nsp1)
- .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "" + initialRegions)
+ .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "3")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
ADMIN.createNamespace(nspDesc);
final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
byte[] columnFamily = Bytes.toBytes("info");
HTableDescriptor tableDescOne = new HTableDescriptor(tableTwo);
tableDescOne.addFamily(new HColumnDescriptor(columnFamily));
+ final int initialRegions = 3;
ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("2000"), initialRegions);
Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
try (Table table = connection.getTable(tableTwo)) {
@@ -342,41 +354,102 @@ public class TestNamespaceAuditor {
}
ADMIN.flush(tableTwo);
List<HRegionInfo> hris = ADMIN.getTableRegions(tableTwo);
- assertEquals(initialRegions, hris.size());
Collections.sort(hris);
- Future<?> f = ADMIN.mergeRegionsAsync(
+ // merge the two regions
+ final Set<String> encodedRegionNamesToMerge =
+ Sets.newHashSet(hris.get(0).getEncodedName(), hris.get(1).getEncodedName());
+ ADMIN.mergeRegionsAsync(
hris.get(0).getEncodedNameAsBytes(),
hris.get(1).getEncodedNameAsBytes(),
false);
- f.get(10, TimeUnit.SECONDS);
+ UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate<Exception>() {
+
+ @Override
+ public boolean evaluate() throws Exception {
+ RegionStates regionStates =
+ UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
+ for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
+ if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
+ return false;
+ }
+ if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ @Override
+ public String explainFailure() throws Exception {
+ RegionStates regionStates =
+ UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
+ for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
+ if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
+ return hri + " which is expected to be merged is still online";
+ }
+ if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
+ return hri + " is still in not opened";
+ }
+ }
+ return "Unknown";
+ }
+ });
hris = ADMIN.getTableRegions(tableTwo);
assertEquals(initialRegions - 1, hris.size());
Collections.sort(hris);
+
+ final HRegionInfo hriToSplit = hris.get(1);
ADMIN.split(tableTwo, Bytes.toBytes("500"));
- // Not much we can do here until we have split return a Future.
- Threads.sleep(5000);
+
+ UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate<Exception>() {
+
+ @Override
+ public boolean evaluate() throws Exception {
+ RegionStates regionStates =
+ UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
+ for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
+ if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
+ return false;
+ }
+ if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public String explainFailure() throws Exception {
+ RegionStates regionStates =
+ UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
+ for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
+ if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
+ return hriToSplit + " which is expected to be split is still online";
+ }
+ if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
+ return hri + " is still in not opened";
+ }
+ }
+ return "Unknown";
+ }
+ });
hris = ADMIN.getTableRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris);
- // Fail region merge through Coprocessor hook
+ // fail region merge through Coprocessor hook
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost();
Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class.getName());
CPMasterObserver masterObserver = (CPMasterObserver) coprocessor;
masterObserver.failMerge(true);
+ masterObserver.triggered = false;
- f = ADMIN.mergeRegionsAsync(
+ ADMIN.mergeRegionsAsync(
hris.get(1).getEncodedNameAsBytes(),
hris.get(2).getEncodedNameAsBytes(),
false);
- try {
- f.get(10, TimeUnit.SECONDS);
- fail("Merge was supposed to fail!");
- } catch (ExecutionException ee) {
- // Expected.
- }
+ masterObserver.waitUtilTriggered();
hris = ADMIN.getTableRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris);
@@ -388,6 +461,67 @@ public class TestNamespaceAuditor {
assertEquals(initialRegions, ADMIN.getTableRegions(tableTwo).size());
}
+ @Test
+ public void testRegionOperations() throws Exception {
+ String nsp1 = prefix + "_regiontest";
+ NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1)
+ .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "2")
+ .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
+ ADMIN.createNamespace(nspDesc);
+ boolean constraintViolated = false;
+ final TableName tableOne = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1");
+ byte[] columnFamily = Bytes.toBytes("info");
+ HTableDescriptor tableDescOne = new HTableDescriptor(tableOne);
+ tableDescOne.addFamily(new HColumnDescriptor(columnFamily));
+ NamespaceTableAndRegionInfo stateInfo;
+ try {
+ ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("1000"), 7);
+ } catch (Exception exp) {
+ assertTrue(exp instanceof DoNotRetryIOException);
+ LOG.info(exp);
+ constraintViolated = true;
+ } finally {
+ assertTrue(constraintViolated);
+ }
+ assertFalse(ADMIN.tableExists(tableOne));
+ // This call will pass.
+ ADMIN.createTable(tableDescOne);
+ Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
+ Table htable = connection.getTable(tableOne);
+ UTIL.loadNumericRows(htable, Bytes.toBytes("info"), 1, 1000);
+ ADMIN.flush(tableOne);
+ stateInfo = getNamespaceState(nsp1);
+ assertEquals(1, stateInfo.getTables().size());
+ assertEquals(1, stateInfo.getRegionCount());
+ restartMaster();
+
+ HRegion actualRegion = UTIL.getHBaseCluster().getRegions(tableOne).get(0);
+ CustomObserver observer = (CustomObserver) actualRegion.getCoprocessorHost().findCoprocessor(
+ CustomObserver.class.getName());
+ assertNotNull(observer);
+
+ ADMIN.split(tableOne, Bytes.toBytes("500"));
+ observer.postSplit.await();
+ assertEquals(2, ADMIN.getTableRegions(tableOne).size());
+ actualRegion = UTIL.getHBaseCluster().getRegions(tableOne).get(0);
+ observer = (CustomObserver) actualRegion.getCoprocessorHost().findCoprocessor(
+ CustomObserver.class.getName());
+ assertNotNull(observer);
+
+ //Before we go on split, we should remove all reference store files.
+ ADMIN.compact(tableOne);
+ observer.postCompact.await();
+
+ ADMIN.split(tableOne, getSplitKey(actualRegion.getRegionInfo().getStartKey(),
+ actualRegion.getRegionInfo().getEndKey()));
+ observer.postSplit.await();
+ // Make sure no regions have been added.
+ List<HRegionInfo> hris = ADMIN.getTableRegions(tableOne);
+ assertEquals(2, hris.size());
+
+ htable.close();
+ }
+
/*
* Create a table and make sure that the table creation fails after adding this table entry into
* namespace quota cache. Now correct the failure and recreate the table with same name.
@@ -457,9 +591,16 @@ public class TestNamespaceAuditor {
}
public static class CustomObserver implements RegionObserver {
+ volatile CountDownLatch postSplit;
volatile CountDownLatch postCompact;
@Override
+ public void postCompleteSplit(ObserverContext<RegionCoprocessorEnvironment> ctx)
+ throws IOException {
+ postSplit.countDown();
+ }
+
+ @Override
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
Store store, StoreFile resultFile) throws IOException {
postCompact.countDown();
@@ -467,6 +608,7 @@ public class TestNamespaceAuditor {
@Override
public void start(CoprocessorEnvironment e) throws IOException {
+ postSplit = new CountDownLatch(1);
postCompact = new CountDownLatch(1);
}
}
@@ -587,7 +729,7 @@ public class TestNamespaceAuditor {
ADMIN.createTable(tableDescOne);
ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4);
}
-
+
@Test(expected = QuotaExceededException.class)
public void testCloneSnapshotQuotaExceed() throws Exception {
String nsp = prefix + "_testTableQuotaExceedWithCloneSnapshot";
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java
index 8eb2e58..296b38f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java
@@ -35,7 +35,7 @@ import org.apache.zookeeper.KeeperException;
public class SimpleMasterProcedureManager extends MasterProcedureManager {
- public static final String SIMPLE_SIGNATURE = "simple_test";
+ public static final String SIMPLE_SIGNATURE = "simle_test";
public static final String SIMPLE_DATA = "simple_test_data";
private static final Log LOG = LogFactory.getLog(SimpleMasterProcedureManager.class);
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java
index 09fb01f..f6dc8c0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java
@@ -77,10 +77,9 @@ public class TestCompactSplitThread {
// block writes if we get to blockingStoreFiles store files
conf.setInt("hbase.hstore.blockingStoreFiles", blockingStoreFiles);
// Ensure no extra cleaners on by default (e.g. TimeToLiveHFileCleaner)
- conf.setInt(CompactSplit.LARGE_COMPACTION_THREADS, 3);
- conf.setInt(CompactSplit.SMALL_COMPACTION_THREADS, 4);
- conf.setInt(CompactSplit.SPLIT_THREADS, 5);
- conf.setInt(CompactSplit.MERGE_THREADS, 6);
+ conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS, 3);
+ conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS, 4);
+ conf.setInt(CompactSplitThread.SPLIT_THREADS, 5);
}
@After
@@ -115,10 +114,9 @@ public class TestCompactSplitThread {
assertEquals(5, regionServer.compactSplitThread.getSplitThreadNum());
// change bigger configurations and do online update
- conf.setInt(CompactSplit.LARGE_COMPACTION_THREADS, 4);
- conf.setInt(CompactSplit.SMALL_COMPACTION_THREADS, 5);
- conf.setInt(CompactSplit.SPLIT_THREADS, 6);
- conf.setInt(CompactSplit.MERGE_THREADS, 7);
+ conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS, 4);
+ conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS, 5);
+ conf.setInt(CompactSplitThread.SPLIT_THREADS, 6);
try {
regionServer.compactSplitThread.onConfigurationChange(conf);
} catch (IllegalArgumentException iae) {
@@ -131,10 +129,9 @@ public class TestCompactSplitThread {
assertEquals(6, regionServer.compactSplitThread.getSplitThreadNum());
// change smaller configurations and do online update
- conf.setInt(CompactSplit.LARGE_COMPACTION_THREADS, 2);
- conf.setInt(CompactSplit.SMALL_COMPACTION_THREADS, 3);
- conf.setInt(CompactSplit.SPLIT_THREADS, 4);
- conf.setInt(CompactSplit.MERGE_THREADS, 5);
+ conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS, 2);
+ conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS, 3);
+ conf.setInt(CompactSplitThread.SPLIT_THREADS, 4);
try {
regionServer.compactSplitThread.onConfigurationChange(conf);
} catch (IllegalArgumentException iae) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index 747fd54..5f4c0aa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -294,7 +294,7 @@ public class TestCompaction {
// setup a compact/split thread on a mock server
HRegionServer mockServer = Mockito.mock(HRegionServer.class);
Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf());
- CompactSplit thread = new CompactSplit(mockServer);
+ CompactSplitThread thread = new CompactSplitThread(mockServer);
Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
// setup a region/store with some files
@@ -318,7 +318,7 @@ public class TestCompaction {
// setup a compact/split thread on a mock server
HRegionServer mockServer = Mockito.mock(HRegionServer.class);
Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf());
- CompactSplit thread = new CompactSplit(mockServer);
+ CompactSplitThread thread = new CompactSplitThread(mockServer);
Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
// setup a region/store with some files
@@ -357,7 +357,7 @@ public class TestCompaction {
/**
* HBASE-7947: Regression test to ensure adding to the correct list in the
- * {@link CompactSplit}
+ * {@link CompactSplitThread}
* @throws Exception on failure
*/
@Test
@@ -365,7 +365,7 @@ public class TestCompaction {
// setup a compact/split thread on a mock server
HRegionServer mockServer = Mockito.mock(HRegionServer.class);
Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf());
- CompactSplit thread = new CompactSplit(mockServer);
+ CompactSplitThread thread = new CompactSplitThread(mockServer);
Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread);
// setup a region/store with some files
@@ -548,7 +548,7 @@ public class TestCompaction {
when(mockServer.isStopped()).thenReturn(false);
when(mockServer.getConfiguration()).thenReturn(conf);
when(mockServer.getChoreService()).thenReturn(new ChoreService("test"));
- CompactSplit cst = new CompactSplit(mockServer);
+ CompactSplitThread cst = new CompactSplitThread(mockServer);
when(mockServer.getCompactSplitThread()).thenReturn(cst);
//prevent large compaction thread pool stealing job from small compaction queue.
cst.shutdownLongCompactions();
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
index 430aef5..4264863 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.util.TestTableName;
import org.junit.After;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -55,10 +54,6 @@ import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-/**
- * HBASE-13651 Handle StoreFileScanner FileNotFoundException
- */
-@Ignore
@Category({MasterTests.class, LargeTests.class})
public class TestCorruptedRegionStoreFile {
private static final Log LOG = LogFactory.getLog(TestCorruptedRegionStoreFile.class);
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
index 0aa39f6..88bbffb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
@@ -107,8 +107,8 @@ public class TestHRegionFileSystem {
// alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor
hcdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD");
admin.modifyColumnFamily(TABLE_NAME, hcdA);
- while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().
- getRegionStates().hasRegionsInTransition()) {
+ while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
+ .isRegionsInTransition()) {
Thread.sleep(200);
LOG.debug("Waiting on table to finish schema altering");
}
@@ -117,7 +117,7 @@ public class TestHRegionFileSystem {
hcdB.setStoragePolicy("ALL_SSD");
admin.modifyColumnFamily(TABLE_NAME, hcdB);
while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
- .hasRegionsInTransition()) {
+ .isRegionsInTransition()) {
Thread.sleep(200);
LOG.debug("Waiting on table to finish schema altering");
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index dcb2c86..c04f2d4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -56,20 +56,19 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
+import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterRpcServices;
import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-import org.apache.hadoop.hbase.master.assignment.RegionStates;
-import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.apache.hadoop.hbase.util.Pair;
@@ -79,7 +78,6 @@ import org.apache.zookeeper.KeeperException;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
-import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -87,6 +85,8 @@ import org.junit.rules.TestName;
import org.junit.rules.TestRule;
import com.google.common.base.Joiner;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
@Category({RegionServerTests.class, MediumTests.class})
public class TestRegionMergeTransactionOnCluster {
@@ -154,16 +154,24 @@ public class TestRegionMergeTransactionOnCluster {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
AssignmentManager am = cluster.getMaster().getAssignmentManager();
RegionStates regionStates = am.getRegionStates();
+ long start = EnvironmentEdgeManager.currentTime();
+ while (!regionStates.isRegionInState(hri, State.MERGED)) {
+ assertFalse("Timed out in waiting one merged region to be in state MERGED",
+ EnvironmentEdgeManager.currentTime() - start > 60000);
+ Thread.sleep(500);
+ }
// We should not be able to assign it again
am.assign(hri, true);
assertFalse("Merged region can't be assigned",
regionStates.isRegionInTransition(hri));
+ assertTrue(regionStates.isRegionInState(hri, State.MERGED));
// We should not be able to unassign it either
- am.unassign(hri);
+ am.unassign(hri, null);
assertFalse("Merged region can't be unassigned",
regionStates.isRegionInTransition(hri));
+ assertTrue(regionStates.isRegionInState(hri, State.MERGED));
table.close();
}
@@ -200,7 +208,8 @@ public class TestRegionMergeTransactionOnCluster {
// Create table and load data.
Table table = createTableAndLoadData(MASTER, tableName);
// Merge 1st and 2nd region
- mergeRegionsAndVerifyRegionNum(MASTER, tableName, 0, 1, INITIAL_REGION_NUM - 1);
+ mergeRegionsAndVerifyRegionNum(MASTER, tableName, 0, 1,
+ INITIAL_REGION_NUM - 1);
verifyRowCount(table, ROWSIZE);
table.close();
@@ -278,9 +287,6 @@ public class TestRegionMergeTransactionOnCluster {
cleaned = ADMIN.runCatalogScan();
LOG.debug("catalog janitor returned " + cleaned);
Thread.sleep(50);
- // Cleanup is async so wait till all procedures are done running.
- ProcedureTestingUtility.waitNoProcedureRunning(
- TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor());
}
assertFalse(regionAdir.toString(), fs.exists(regionAdir));
assertFalse(regionBdir.toString(), fs.exists(regionBdir));
@@ -314,13 +320,12 @@ public class TestRegionMergeTransactionOnCluster {
try {
// Create table and load data.
Table table = createTableAndLoadData(MASTER, tableName);
- AssignmentManager am = MASTER.getAssignmentManager();
- List<HRegionInfo> regions = am.getRegionStates().getRegionsOfTable(tableName);
+ RegionStates regionStates = MASTER.getAssignmentManager().getRegionStates();
+ List<HRegionInfo> regions = regionStates.getRegionsOfTable(tableName);
// Fake offline one region
HRegionInfo a = regions.get(0);
HRegionInfo b = regions.get(1);
- am.unassign(b);
- am.offlineRegion(b);
+ regionStates.regionOffline(a);
try {
// Merge offline region. Region a is offline here
admin.mergeRegionsAsync(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false)
@@ -357,7 +362,7 @@ public class TestRegionMergeTransactionOnCluster {
}
}
- @Ignore @Test // DISABLED FOR NOW. DON'T KNOW HOW IT IS SUPPOSED TO WORK.
+ @Test
public void testMergeWithReplicas() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create table and load data.
@@ -425,16 +430,12 @@ public class TestRegionMergeTransactionOnCluster {
List<HRegionInfo> tableRegionsInMaster;
long timeout = System.currentTimeMillis() + waitTime;
while (System.currentTimeMillis() < timeout) {
- tableRegionsInMeta =
- MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tablename);
- tableRegionsInMaster =
- master.getAssignmentManager().getRegionStates().getRegionsOfTable(tablename);
- LOG.info(tableRegionsInMaster);
- LOG.info(tableRegionsInMeta);
- int tableRegionsInMetaSize = tableRegionsInMeta.size();
- int tableRegionsInMasterSize = tableRegionsInMaster.size();
- if (tableRegionsInMetaSize == expectedRegionNum
- && tableRegionsInMasterSize == expectedRegionNum) {
+ tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(
+ TEST_UTIL.getConnection(), tablename);
+ tableRegionsInMaster = master.getAssignmentManager().getRegionStates()
+ .getRegionsOfTable(tablename);
+ if (tableRegionsInMeta.size() == expectedRegionNum
+ && tableRegionsInMaster.size() == expectedRegionNum) {
break;
}
Thread.sleep(250);
@@ -470,13 +471,12 @@ public class TestRegionMergeTransactionOnCluster {
verifyRowCount(table, ROWSIZE);
LOG.info("Verified " + table.getName());
- // Sleep here is an ugly hack to allow region transitions to finish
+ // sleep here is an ugly hack to allow region transitions to finish
long timeout = System.currentTimeMillis() + waitTime;
List<Pair<HRegionInfo, ServerName>> tableRegions;
while (System.currentTimeMillis() < timeout) {
tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
TEST_UTIL.getConnection(), tablename);
- LOG.info("Found " + tableRegions.size() + ", expecting " + numRegions * replication);
if (tableRegions.size() == numRegions * replication)
break;
Thread.sleep(250);
@@ -546,7 +546,7 @@ public class TestRegionMergeTransactionOnCluster {
if (enabled.get() && req.getTransition(0).getTransitionCode()
== TransitionCode.READY_TO_MERGE && !resp.hasErrorMessage()) {
RegionStates regionStates = myMaster.getAssignmentManager().getRegionStates();
- for (RegionState regionState: regionStates.getRegionsStateInTransition()) {
+ for (RegionState regionState: regionStates.getRegionsInTransition()) {
// Find the merging_new region and remove it
if (regionState.isMergingNew()) {
regionStates.deleteRegion(regionState.getRegion());
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index 3c03827..e6b1bc5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -17,44 +17,16 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CategoryBasedTimeout;
-import org.apache.hadoop.hbase.CompatibilityFactory;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
@@ -70,6 +42,12 @@ import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.junit.rules.TestRule;
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
@Category({RegionServerTests.class, LargeTests.class})
public class TestRegionServerMetrics {
private static final Log LOG = LogFactory.getLog(TestRegionServerMetrics.class);