You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2017/09/28 12:30:32 UTC
[02/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
index 497ecc4..5e553dc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
@@ -86,7 +86,7 @@ public class TestRegionInfoBuilder {
RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
byte [] bytes = RegionInfo.toByteArray(ri);
RegionInfo pbri = RegionInfo.parseFrom(bytes);
- assertTrue(ri.equals(pbri));
+ assertTrue(RegionInfo.COMPARATOR.compare(ri, pbri) == 0);
}
@Test
@@ -286,7 +286,7 @@ public class TestRegionInfoBuilder {
.setReplicaId(replicaId).build();
// convert two times, compare
- RegionInfo convertedRi = ProtobufUtil.toRegionInfo(ProtobufUtil.toProtoRegionInfo(ri));
+ RegionInfo convertedRi = ProtobufUtil.toRegionInfo(ProtobufUtil.toRegionInfo(ri));
assertEquals(ri, convertedRi);
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index b594a2f..15c6b76 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
@@ -49,6 +48,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -62,11 +62,6 @@ import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.assignment.RegionStates;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -87,6 +82,11 @@ import org.junit.rules.TestName;
import org.junit.rules.TestRule;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
@Category({RegionServerTests.class, MediumTests.class})
public class TestRegionMergeTransactionOnCluster {
@@ -142,14 +142,14 @@ public class TestRegionMergeTransactionOnCluster {
INITIAL_REGION_NUM - 1);
// Merge 2nd and 3th region
- PairOfSameType<HRegionInfo> mergedRegions =
+ PairOfSameType<RegionInfo> mergedRegions =
mergeRegionsAndVerifyRegionNum(MASTER, tableName, 1, 2,
INITIAL_REGION_NUM - 2);
verifyRowCount(table, ROWSIZE);
// Randomly choose one of the two merged regions
- HRegionInfo hri = RandomUtils.nextBoolean() ?
+ RegionInfo hri = RandomUtils.nextBoolean() ?
mergedRegions.getFirst() : mergedRegions.getSecond();
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
AssignmentManager am = cluster.getMaster().getAssignmentManager();
@@ -204,9 +204,9 @@ public class TestRegionMergeTransactionOnCluster {
verifyRowCount(table, ROWSIZE);
table.close();
- List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
+ List<Pair<RegionInfo, ServerName>> tableRegions = MetaTableAccessor
.getTableRegionsAndLocations(MASTER.getConnection(), tableName);
- HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
+ RegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
TableDescriptor tableDescriptor = MASTER.getTableDescriptors().get(
tableName);
Result mergedRegionResult = MetaTableAccessor.getRegionResult(
@@ -219,9 +219,9 @@ public class TestRegionMergeTransactionOnCluster {
HConstants.MERGEB_QUALIFIER) != null);
// merging regions' directory are in the file system all the same
- PairOfSameType<HRegionInfo> p = MetaTableAccessor.getMergeRegions(mergedRegionResult);
- HRegionInfo regionA = p.getFirst();
- HRegionInfo regionB = p.getSecond();
+ PairOfSameType<RegionInfo> p = MetaTableAccessor.getMergeRegions(mergedRegionResult);
+ RegionInfo regionA = p.getFirst();
+ RegionInfo regionB = p.getSecond();
FileSystem fs = MASTER.getMasterFileSystem().getFileSystem();
Path rootDir = MASTER.getMasterFileSystem().getRootDir();
@@ -315,10 +315,10 @@ public class TestRegionMergeTransactionOnCluster {
// Create table and load data.
Table table = createTableAndLoadData(MASTER, tableName);
AssignmentManager am = MASTER.getAssignmentManager();
- List<HRegionInfo> regions = am.getRegionStates().getRegionsOfTable(tableName);
+ List<RegionInfo> regions = am.getRegionStates().getRegionsOfTable(tableName);
// Fake offline one region
- HRegionInfo a = regions.get(0);
- HRegionInfo b = regions.get(1);
+ RegionInfo a = regions.get(0);
+ RegionInfo b = regions.get(1);
am.unassign(b);
am.offlineRegion(b);
try {
@@ -362,21 +362,21 @@ public class TestRegionMergeTransactionOnCluster {
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create table and load data.
createTableAndLoadData(MASTER, tableName, 5, 2);
- List<Pair<HRegionInfo, ServerName>> initialRegionToServers =
+ List<Pair<RegionInfo, ServerName>> initialRegionToServers =
MetaTableAccessor.getTableRegionsAndLocations(
TEST_UTIL.getConnection(), tableName);
// Merge 1st and 2nd region
- PairOfSameType<HRegionInfo> mergedRegions = mergeRegionsAndVerifyRegionNum(MASTER, tableName,
+ PairOfSameType<RegionInfo> mergedRegions = mergeRegionsAndVerifyRegionNum(MASTER, tableName,
0, 2, 5 * 2 - 2);
- List<Pair<HRegionInfo, ServerName>> currentRegionToServers =
+ List<Pair<RegionInfo, ServerName>> currentRegionToServers =
MetaTableAccessor.getTableRegionsAndLocations(
TEST_UTIL.getConnection(), tableName);
- List<HRegionInfo> initialRegions = new ArrayList<>();
- for (Pair<HRegionInfo, ServerName> p : initialRegionToServers) {
+ List<RegionInfo> initialRegions = new ArrayList<>();
+ for (Pair<RegionInfo, ServerName> p : initialRegionToServers) {
initialRegions.add(p.getFirst());
}
- List<HRegionInfo> currentRegions = new ArrayList<>();
- for (Pair<HRegionInfo, ServerName> p : currentRegionToServers) {
+ List<RegionInfo> currentRegions = new ArrayList<>();
+ for (Pair<RegionInfo, ServerName> p : currentRegionToServers) {
currentRegions.add(p.getFirst());
}
assertTrue(initialRegions.contains(mergedRegions.getFirst())); //this is the first region
@@ -396,23 +396,23 @@ public class TestRegionMergeTransactionOnCluster {
mergedRegions.getSecond(), 1))); //replica of the merged region
}
- private PairOfSameType<HRegionInfo> mergeRegionsAndVerifyRegionNum(
+ private PairOfSameType<RegionInfo> mergeRegionsAndVerifyRegionNum(
HMaster master, TableName tablename,
int regionAnum, int regionBnum, int expectedRegionNum) throws Exception {
- PairOfSameType<HRegionInfo> mergedRegions =
+ PairOfSameType<RegionInfo> mergedRegions =
requestMergeRegion(master, tablename, regionAnum, regionBnum);
waitAndVerifyRegionNum(master, tablename, expectedRegionNum);
return mergedRegions;
}
- private PairOfSameType<HRegionInfo> requestMergeRegion(
+ private PairOfSameType<RegionInfo> requestMergeRegion(
HMaster master, TableName tablename,
int regionAnum, int regionBnum) throws Exception {
- List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
+ List<Pair<RegionInfo, ServerName>> tableRegions = MetaTableAccessor
.getTableRegionsAndLocations(
TEST_UTIL.getConnection(), tablename);
- HRegionInfo regionA = tableRegions.get(regionAnum).getFirst();
- HRegionInfo regionB = tableRegions.get(regionBnum).getFirst();
+ RegionInfo regionA = tableRegions.get(regionAnum).getFirst();
+ RegionInfo regionB = tableRegions.get(regionBnum).getFirst();
ADMIN.mergeRegionsAsync(
regionA.getEncodedNameAsBytes(),
regionB.getEncodedNameAsBytes(), false);
@@ -421,8 +421,8 @@ public class TestRegionMergeTransactionOnCluster {
private void waitAndVerifyRegionNum(HMaster master, TableName tablename,
int expectedRegionNum) throws Exception {
- List<Pair<HRegionInfo, ServerName>> tableRegionsInMeta;
- List<HRegionInfo> tableRegionsInMaster;
+ List<Pair<RegionInfo, ServerName>> tableRegionsInMeta;
+ List<RegionInfo> tableRegionsInMaster;
long timeout = System.currentTimeMillis() + waitTime;
while (System.currentTimeMillis() < timeout) {
tableRegionsInMeta =
@@ -472,7 +472,7 @@ public class TestRegionMergeTransactionOnCluster {
// Sleep here is an ugly hack to allow region transitions to finish
long timeout = System.currentTimeMillis() + waitTime;
- List<Pair<HRegionInfo, ServerName>> tableRegions;
+ List<Pair<RegionInfo, ServerName>> tableRegions;
while (System.currentTimeMillis() < timeout) {
tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
TEST_UTIL.getConnection(), tablename);
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java
index 1e3db70..6ff9f75 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java
@@ -18,6 +18,12 @@
package org.apache.hadoop.hbase.regionserver;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -25,13 +31,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@@ -43,12 +48,12 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
@@ -206,13 +211,13 @@ public class TestRegionServerAbort {
*/
public static class ErrorThrowingHRegion extends HRegion {
public ErrorThrowingHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration confParam,
- HRegionInfo regionInfo, TableDescriptor htd,
+ RegionInfo regionInfo, TableDescriptor htd,
RegionServerServices rsServices) {
super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices);
}
public ErrorThrowingHRegion(HRegionFileSystem fs, WAL wal, Configuration confParam,
- HTableDescriptor htd, RegionServerServices rsServices) {
+ TableDescriptor htd, RegionServerServices rsServices) {
super(fs, wal, confParam, htd, rsServices);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
index 7c16d32..44ab24e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
@@ -27,16 +27,18 @@ import static org.mockito.Mockito.mock;
import java.util.HashMap;
import java.util.Map;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
+
/**
* Test class for isolated (non-cluster) tests surrounding the report
* of Region space use to the Master by RegionServers.
@@ -47,10 +49,20 @@ public class TestRegionServerRegionSpaceUseReport {
@Test
public void testConversion() {
TableName tn = TableName.valueOf("table1");
- HRegionInfo hri1 = new HRegionInfo(tn, Bytes.toBytes("a"), Bytes.toBytes("b"));
- HRegionInfo hri2 = new HRegionInfo(tn, Bytes.toBytes("b"), Bytes.toBytes("c"));
- HRegionInfo hri3 = new HRegionInfo(tn, Bytes.toBytes("c"), Bytes.toBytes("d"));
- Map<HRegionInfo,Long> sizes = new HashMap<>();
+
+ RegionInfo hri1 = RegionInfoBuilder.newBuilder(tn)
+ .setStartKey(Bytes.toBytes("a"))
+ .setEndKey(Bytes.toBytes("b"))
+ .build();
+ RegionInfo hri2 = RegionInfoBuilder.newBuilder(tn)
+ .setStartKey(Bytes.toBytes("b"))
+ .setEndKey(Bytes.toBytes("c"))
+ .build();
+ RegionInfo hri3 = RegionInfoBuilder.newBuilder(tn)
+ .setStartKey(Bytes.toBytes("c"))
+ .setEndKey(Bytes.toBytes("d"))
+ .build();
+ Map<RegionInfo,Long> sizes = new HashMap<>();
sizes.put(hri1, 1024L * 1024L);
sizes.put(hri2, 1024L * 1024L * 8L);
sizes.put(hri3, 1024L * 1024L * 32L);
@@ -58,13 +70,12 @@ public class TestRegionServerRegionSpaceUseReport {
// Call the real method to convert the map into a protobuf
HRegionServer rs = mock(HRegionServer.class);
doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class));
- doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong());
+ doCallRealMethod().when(rs).convertRegionSize(any(RegionInfo.class), anyLong());
RegionSpaceUseReportRequest requests = rs.buildRegionSpaceUseReportRequest(sizes);
assertEquals(sizes.size(), requests.getSpaceUseCount());
for (RegionSpaceUse spaceUse : requests.getSpaceUseList()) {
- RegionInfo ri = spaceUse.getRegionInfo();
- HRegionInfo hri = HRegionInfo.convert(ri);
+ RegionInfo hri = ProtobufUtil.toRegionInfo(spaceUse.getRegionInfo());
Long expectedSize = sizes.remove(hri);
assertNotNull("Could not find size for HRI: " + hri, expectedSize);
assertEquals(expectedSize.longValue(), spaceUse.getRegionSize());
@@ -77,7 +88,7 @@ public class TestRegionServerRegionSpaceUseReport {
// Call the real method to convert the map into a protobuf
HRegionServer rs = mock(HRegionServer.class);
doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class));
- doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong());
+ doCallRealMethod().when(rs).convertRegionSize(any(RegionInfo.class), anyLong());
rs.buildRegionSpaceUseReportRequest(null);
}
@@ -85,14 +96,17 @@ public class TestRegionServerRegionSpaceUseReport {
@Test(expected = NullPointerException.class)
public void testMalformedMap() {
TableName tn = TableName.valueOf("table1");
- HRegionInfo hri1 = new HRegionInfo(tn, Bytes.toBytes("a"), Bytes.toBytes("b"));
- Map<HRegionInfo,Long> sizes = new HashMap<>();
+ RegionInfo hri1 = RegionInfoBuilder.newBuilder(tn)
+ .setStartKey(Bytes.toBytes("a"))
+ .setEndKey(Bytes.toBytes("b"))
+ .build();
+ Map<RegionInfo,Long> sizes = new HashMap<>();
sizes.put(hri1, null);
// Call the real method to convert the map into a protobuf
HRegionServer rs = mock(HRegionServer.class);
doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class));
- doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong());
+ doCallRealMethod().when(rs).convertRegionSize(any(RegionInfo.class), anyLong());
rs.buildRegionSpaceUseReportRequest(sizes);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
index 00e6231..7907e13 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.java
@@ -21,9 +21,6 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -40,12 +37,12 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HTestConst;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
@@ -54,8 +51,6 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterBase;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -69,6 +64,11 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
+
/**
* Here we test to make sure that scans return the expected Results when the server is sending the
* Client heartbeat messages. Heartbeat messages are essentially keep-alive messages (they prevent
@@ -446,12 +446,12 @@ public class TestScannerHeartbeatMessages {
private static volatile boolean sleepBetweenColumnFamilies = false;
public HeartbeatHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration confParam,
- HRegionInfo regionInfo, TableDescriptor htd, RegionServerServices rsServices) {
+ RegionInfo regionInfo, TableDescriptor htd, RegionServerServices rsServices) {
super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices);
}
public HeartbeatHRegion(HRegionFileSystem fs, WAL wal, Configuration confParam,
- HTableDescriptor htd, RegionServerServices rsServices) {
+ TableDescriptor htd, RegionServerServices rsServices) {
super(fs, wal, confParam, htd, rsServices);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 57f50b6..1a69be3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -62,6 +61,7 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
@@ -154,10 +154,10 @@ public class TestSplitTransactionOnCluster {
}
}
- private HRegionInfo getAndCheckSingleTableRegion(final List<HRegion> regions)
+ private RegionInfo getAndCheckSingleTableRegion(final List<HRegion> regions)
throws IOException, InterruptedException {
assertEquals(1, regions.size());
- HRegionInfo hri = regions.get(0).getRegionInfo();
+ RegionInfo hri = regions.get(0).getRegionInfo();
try {
cluster.getMaster().getAssignmentManager().waitForAssignment(hri, 600000);
} catch (NoSuchProcedureException e) {
@@ -184,7 +184,7 @@ public class TestSplitTransactionOnCluster {
// Create table then get the single region for our new table.
Table t = createTableAndWait(tableName, Bytes.toBytes("cf"));
final List<HRegion> regions = cluster.getRegions(tableName);
- final HRegionInfo hri = getAndCheckSingleTableRegion(regions);
+ final RegionInfo hri = getAndCheckSingleTableRegion(regions);
insertData(tableName, admin, t);
t.close();
@@ -299,7 +299,7 @@ public class TestSplitTransactionOnCluster {
// Create table then get the single region for our new table.
Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
- HRegionInfo hri = getAndCheckSingleTableRegion(regions);
+ RegionInfo hri = getAndCheckSingleTableRegion(regions);
int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
@@ -355,7 +355,7 @@ public class TestSplitTransactionOnCluster {
// Create table then get the single region for our new table.
Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
- HRegionInfo hri = getAndCheckSingleTableRegion(regions);
+ RegionInfo hri = getAndCheckSingleTableRegion(regions);
int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
@@ -376,7 +376,7 @@ public class TestSplitTransactionOnCluster {
List<HRegion> daughters = checkAndGetDaughters(tableName);
// Now split one of the daughters.
regionCount = ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size();
- HRegionInfo daughter = daughters.get(0).getRegionInfo();
+ RegionInfo daughter = daughters.get(0).getRegionInfo();
LOG.info("Daughter we are going to split: " + daughter);
// Compact first to ensure we have cleaned up references -- else the split
// will fail.
@@ -384,7 +384,7 @@ public class TestSplitTransactionOnCluster {
daughters = cluster.getRegions(tableName);
HRegion daughterRegion = null;
for (HRegion r: daughters) {
- if (r.getRegionInfo().equals(daughter)) {
+ if (RegionInfo.COMPARATOR.compare(r.getRegionInfo(), daughter) == 0) {
daughterRegion = r;
LOG.info("Found matching HRI: " + daughterRegion);
break;
@@ -451,11 +451,11 @@ public class TestSplitTransactionOnCluster {
admin.flush(userTableName);
}
admin.majorCompact(userTableName);
- List<HRegionInfo> regionsOfTable =
+ List<RegionInfo> regionsOfTable =
cluster.getMaster().getAssignmentManager().getRegionStates()
.getRegionsOfTable(userTableName);
assertEquals(1, regionsOfTable.size());
- HRegionInfo hRegionInfo = regionsOfTable.get(0);
+ RegionInfo hRegionInfo = regionsOfTable.get(0);
Put p = new Put("row6".getBytes());
p.addColumn("col".getBytes(), "ql".getBytes(), "val".getBytes());
table.put(p);
@@ -511,7 +511,7 @@ public class TestSplitTransactionOnCluster {
// Create table then get the single region for our new table.
Table t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
- HRegionInfo hri = getAndCheckSingleTableRegion(regions);
+ RegionInfo hri = getAndCheckSingleTableRegion(regions);
int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
@@ -660,7 +660,7 @@ public class TestSplitTransactionOnCluster {
// Create table then get the single region for our new table.
createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
- HRegionInfo hri = getAndCheckSingleTableRegion(regions);
+ RegionInfo hri = getAndCheckSingleTableRegion(regions);
ensureTableRegionNotOnSameServerAsMeta(admin, hri);
int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo()
.getRegionName());
@@ -809,7 +809,7 @@ public class TestSplitTransactionOnCluster {
return master;
}
- private void split(final HRegionInfo hri, final HRegionServer server, final int regionCount)
+ private void split(final RegionInfo hri, final HRegionServer server, final int regionCount)
throws IOException, InterruptedException {
this.admin.splitRegion(hri.getRegionName());
for (int i = 0; this.cluster.getRegions(hri.getTable()).size() <= regionCount && i < 60; i++) {
@@ -833,7 +833,7 @@ public class TestSplitTransactionOnCluster {
* @throws InterruptedException
*/
private int ensureTableRegionNotOnSameServerAsMeta(final Admin admin,
- final HRegionInfo hri)
+ final RegionInfo hri)
throws IOException, MasterNotRunningException,
ZooKeeperConnectionException, InterruptedException {
// Now make sure that the table region is not on same server as that hosting
@@ -901,8 +901,8 @@ public class TestSplitTransactionOnCluster {
private void printOutRegions(final HRegionServer hrs, final String prefix)
throws IOException {
- List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
- for (HRegionInfo region: regions) {
+ List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
+ for (RegionInfo region: regions) {
LOG.info(prefix + region.getRegionNameAsString());
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
index bc66c33..d5e8c1c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
@@ -18,10 +18,10 @@
package org.apache.hadoop.hbase.regionserver.wal;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.junit.Assert.assertNotEquals;
import java.io.IOException;
import java.util.ArrayList;
@@ -45,12 +45,13 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor;
@@ -150,7 +151,7 @@ public abstract class AbstractTestFSWAL {
}
}
- protected void addEdits(WAL log, HRegionInfo hri, HTableDescriptor htd, int times,
+ protected void addEdits(WAL log, RegionInfo hri, HTableDescriptor htd, int times,
MultiVersionConcurrencyControl mvcc, NavigableMap<byte[], Integer> scopes)
throws IOException {
final byte[] row = Bytes.toBytes("row");
@@ -250,10 +251,14 @@ public abstract class AbstractTestFSWAL {
new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
HTableDescriptor t2 =
new HTableDescriptor(TableName.valueOf("t2")).addFamily(new HColumnDescriptor("row"));
- HRegionInfo hri1 =
- new HRegionInfo(t1.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
- HRegionInfo hri2 =
- new HRegionInfo(t2.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
+ RegionInfo hri1 = RegionInfoBuilder.newBuilder(t1.getTableName())
+ .setStartKey(HConstants.EMPTY_START_ROW)
+ .setEndKey(HConstants.EMPTY_END_ROW)
+ .build();
+ RegionInfo hri2 = RegionInfoBuilder.newBuilder(t2.getTableName())
+ .setStartKey(HConstants.EMPTY_START_ROW)
+ .setEndKey(HConstants.EMPTY_END_ROW)
+ .build();
// add edits and roll the wal
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
NavigableMap<byte[], Integer> scopes1 = new TreeMap<>(Bytes.BYTES_COMPARATOR);
@@ -354,7 +359,7 @@ public abstract class AbstractTestFSWAL {
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
String testName = currentTest.getMethodName();
final TableName tableName = TableName.valueOf(testName);
- final HRegionInfo hri = new HRegionInfo(tableName);
+ final RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build();
final byte[] rowName = tableName.getName();
final HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("f"));
@@ -408,7 +413,7 @@ public abstract class AbstractTestFSWAL {
// Now make appends run slow.
goslow.set(true);
for (int i = 0; i < countPerFamily; i++) {
- final HRegionInfo info = region.getRegionInfo();
+ final RegionInfo info = region.getRegionInfo();
final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC(), scopes);
wal.append(info, logkey, edits, true);
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
index 4c3d2a5..8ff1823 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
@@ -38,6 +37,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -189,8 +189,7 @@ public abstract class AbstractTestLogRolling {
this.tableName = getName();
// TODO: Why does this write data take for ever?
startAndWriteData();
- HRegionInfo region = server.getRegions(TableName.valueOf(tableName)).get(0)
- .getRegionInfo();
+ RegionInfo region = server.getRegions(TableName.valueOf(tableName)).get(0).getRegionInfo();
final WAL log = server.getWAL(region);
LOG.info("after writing there are " + AbstractFSWALProvider.getNumRolledLogFiles(log) + " log files");
assertLogFileSize(log);
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java
index bba08a7..73f70a7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java
@@ -21,9 +21,9 @@ import static org.junit.Assert.assertEquals;
import java.io.IOException;
-import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
@@ -58,7 +58,7 @@ public class TestAsyncLogRolling extends AbstractTestLogRolling {
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
doPut(table, 1);
server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
- HRegionInfo hri = server.getRegions(table.getName()).get(0).getRegionInfo();
+ RegionInfo hri = server.getRegions(table.getName()).get(0).getRegionInfo();
AsyncFSWAL wal = (AsyncFSWAL) server.getWAL(hri);
int numRolledLogFiles = AsyncFSWALProvider.getNumRolledLogFiles(wal);
DatanodeInfo[] dnInfos = wal.getPipeline();
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
index de6fdb3..295ac2f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
@@ -38,10 +38,10 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
@@ -140,7 +140,7 @@ public class TestLogRolling extends AbstractTestLogRolling {
Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName());
- HRegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo();
+ RegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo();
final FSHLog log = (FSHLog) server.getWAL(region);
final AtomicBoolean lowReplicationHookCalled = new AtomicBoolean(false);
@@ -248,7 +248,7 @@ public class TestLogRolling extends AbstractTestLogRolling {
Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName());
- HRegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo();
+ RegionInfo region = server.getRegions(desc.getTableName()).get(0).getRegionInfo();
final WAL log = server.getWAL(region);
final List<Path> paths = new ArrayList<>(1);
final List<Integer> preLogRolledCalled = new ArrayList<>();
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
index a0562bf..6396228 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
@@ -18,9 +18,12 @@
package org.apache.hadoop.hbase.replication;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -30,14 +33,19 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.replication.regionserver.*;
+import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl;
+import org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -53,10 +61,6 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
/**
* Tests ReplicationSource and ReplicationEndpoint interactions
*/
@@ -202,7 +206,7 @@ public class TestReplicationEndpoint extends TestReplicationBase {
// Make sure edits are spread across regions because we do region based batching
// before shipping edits.
for(HRegion region: regions) {
- HRegionInfo hri = region.getRegionInfo();
+ RegionInfo hri = region.getRegionInfo();
byte[] row = hri.getStartKey();
for (int i = 0; i < 100; i++) {
if (row.length > 0) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
index b5b24d9..7a8e639 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
@@ -19,6 +19,8 @@
*/
package org.apache.hadoop.hbase.replication;
+import static org.junit.Assert.assertEquals;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -32,13 +34,13 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HTestConst;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
@@ -60,8 +62,6 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
-import static org.junit.Assert.assertEquals;
-
@Category({ ReplicationTests.class, LargeTests.class })
public class TestSerialReplication {
private static final Log LOG = LogFactory.getLog(TestSerialReplication.class);
@@ -271,7 +271,7 @@ public class TestSerialReplication {
put.addColumn(famName, VALUE, VALUE);
t1.put(put);
}
- List<Pair<HRegionInfo, ServerName>> regions =
+ List<Pair<RegionInfo, ServerName>> regions =
MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), tableName);
utility1.getAdmin().mergeRegionsAsync(regions.get(0).getFirst().getRegionName(),
regions.get(1).getFirst().getRegionName(), true);
@@ -336,10 +336,10 @@ public class TestSerialReplication {
}
private void moveRegion(Table table, int index) throws IOException {
- List<Pair<HRegionInfo, ServerName>> regions =
+ List<Pair<RegionInfo, ServerName>> regions =
MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), table.getName());
assertEquals(1, regions.size());
- HRegionInfo regionInfo = regions.get(0).getFirst();
+ RegionInfo regionInfo = regions.get(0).getFirst();
ServerName name = utility1.getHBaseCluster().getRegionServer(index).getServerName();
utility1.getAdmin()
.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(name.getServerName()));
@@ -354,12 +354,12 @@ public class TestSerialReplication {
}
private void balanceTwoRegions(Table table) throws Exception {
- List<Pair<HRegionInfo, ServerName>> regions =
+ List<Pair<RegionInfo, ServerName>> regions =
MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), table.getName());
assertEquals(2, regions.size());
- HRegionInfo regionInfo1 = regions.get(0).getFirst();
+ RegionInfo regionInfo1 = regions.get(0).getFirst();
ServerName name1 = utility1.getHBaseCluster().getRegionServer(0).getServerName();
- HRegionInfo regionInfo2 = regions.get(1).getFirst();
+ RegionInfo regionInfo2 = regions.get(1).getFirst();
ServerName name2 = utility1.getHBaseCluster().getRegionServer(1).getServerName();
utility1.getAdmin()
.move(regionInfo1.getEncodedNameAsBytes(), Bytes.toBytes(name1.getServerName()));
@@ -377,7 +377,7 @@ public class TestSerialReplication {
private void waitTableHasRightNumberOfRegions(TableName tableName, int num) throws IOException {
while (true) {
- List<Pair<HRegionInfo, ServerName>> regions =
+ List<Pair<RegionInfo, ServerName>> regions =
MetaTableAccessor.getTableRegionsAndLocations(utility1.getConnection(), tableName);
if (regions.size() == num) {
return;
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index 3f0b5b4..c0cb9e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -22,8 +22,8 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
-import java.util.Collections;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@@ -33,23 +33,23 @@ import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
-import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.SnapshotType;
@@ -64,18 +64,20 @@ import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSVisitor;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FSVisitor;
import org.apache.hadoop.hbase.util.MD5Hash;
+import org.apache.yetus.audience.InterfaceAudience;
import org.junit.Assert;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+
import com.google.protobuf.ServiceException;
/**
@@ -221,7 +223,7 @@ public final class SnapshotTestingUtils {
SnapshotReferenceUtil.visitRegionStoreFiles(regionManifest,
new SnapshotReferenceUtil.StoreFileVisitor() {
@Override
- public void storeFile(final HRegionInfo regionInfo, final String family,
+ public void storeFile(final RegionInfo regionInfo, final String family,
final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
snapshotFamilies.add(Bytes.toBytes(family));
}
@@ -243,7 +245,7 @@ public final class SnapshotTestingUtils {
}
// check the region snapshot for all the regions
- List<HRegionInfo> regions = admin.getTableRegions(tableName);
+ List<RegionInfo> regions = admin.getRegions(tableName);
// remove the non-default regions
RegionReplicaUtil.removeNonDefaultRegions(regions);
boolean hasMob = regionManifests.containsKey(MobUtils.getMobRegionInfo(tableName)
@@ -255,7 +257,7 @@ public final class SnapshotTestingUtils {
// region manifest. we should exclude the parent regions.
int regionCountExclusiveSplitParent = 0;
for (SnapshotRegionManifest snapshotRegionManifest : regionManifests.values()) {
- HRegionInfo hri = HRegionInfo.convert(snapshotRegionManifest.getRegionInfo());
+ RegionInfo hri = ProtobufUtil.toRegionInfo(snapshotRegionManifest.getRegionInfo());
if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) {
continue;
}
@@ -265,7 +267,7 @@ public final class SnapshotTestingUtils {
}
// Verify Regions (redundant check, see MasterSnapshotVerifier)
- for (HRegionInfo info : regions) {
+ for (RegionInfo info : regions) {
String regionName = info.getEncodedName();
assertTrue("Missing region name: '" + regionName + "'", regionManifests.containsKey(regionName));
}
@@ -449,7 +451,7 @@ public final class SnapshotTestingUtils {
SnapshotReferenceUtil.visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc,
new SnapshotReferenceUtil.StoreFileVisitor() {
@Override
- public void storeFile(final HRegionInfo regionInfo, final String family,
+ public void storeFile(final RegionInfo regionInfo, final String family,
final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
String region = regionInfo.getEncodedName();
String hfile = storeFile.getName();
@@ -477,11 +479,11 @@ public final class SnapshotTestingUtils {
private final Path rootDir;
static class RegionData {
- public HRegionInfo hri;
+ public RegionInfo hri;
public Path tableDir;
public Path[] files;
- public RegionData(final Path tableDir, final HRegionInfo hri, final int nfiles) {
+ public RegionData(final Path tableDir, final RegionInfo hri, final int nfiles) {
this.tableDir = tableDir;
this.hri = hri;
this.files = new Path[nfiles];
@@ -711,7 +713,10 @@ public final class SnapshotTestingUtils {
byte[] endKey = Bytes.toBytes(1 + i * 2);
// First region, simple with one plain hfile.
- HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
+ RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
+ .setStartKey(startKey)
+ .setEndKey(endKey)
+ .build();
HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri);
regions[i] = new RegionData(tableDir, hri, 3);
for (int j = 0; j < regions[i].files.length; ++j) {
@@ -723,7 +728,7 @@ public final class SnapshotTestingUtils {
// This region contains a reference to the hfile in the first region.
startKey = Bytes.toBytes(2 + i * 2);
endKey = Bytes.toBytes(3 + i * 2);
- hri = new HRegionInfo(htd.getTableName());
+ hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri);
regions[i+1] = new RegionData(tableDir, hri, regions[i].files.length);
for (int j = 0; j < regions[i].files.length; ++j) {
@@ -883,12 +888,12 @@ public final class SnapshotTestingUtils {
public static void verifyReplicasCameOnline(TableName tableName, Admin admin,
int regionReplication) throws IOException {
- List<HRegionInfo> regions = admin.getTableRegions(tableName);
- HashSet<HRegionInfo> set = new HashSet<>();
- for (HRegionInfo hri : regions) {
+ List<RegionInfo> regions = admin.getRegions(tableName);
+ HashSet<RegionInfo> set = new HashSet<>();
+ for (RegionInfo hri : regions) {
set.add(RegionReplicaUtil.getRegionInfoForDefaultReplica(hri));
for (int i = 0; i < regionReplication; i++) {
- HRegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(hri, i);
+ RegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(hri, i);
if (!regions.contains(replica)) {
Assert.fail(replica + " is not contained in the list of online regions");
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
index 414a6cb..86e5eb3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
@@ -52,6 +51,8 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -63,12 +64,6 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Multimap;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -82,6 +77,13 @@ import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.mockito.Mockito;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Multimap;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
+
/**
* Test cases for the atomic load error handling of the bulk load functionality.
*/
@@ -201,7 +203,7 @@ public class TestLoadIncrementalHFilesSplitRecovery {
// need to call regions server to by synchronous but isn't visible.
HRegionServer hrs = util.getRSForFirstRegionInTable(table);
- for (HRegionInfo hri : ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) {
+ for (RegionInfo hri : ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) {
if (hri.getTable().equals(table)) {
util.getAdmin().splitRegionAsync(hri.getRegionName(), rowkey(ROWCOUNT / 2));
// ProtobufUtil.split(null, hrs.getRSRpcServices(), hri, rowkey(ROWCOUNT / 2));
@@ -212,7 +214,7 @@ public class TestLoadIncrementalHFilesSplitRecovery {
int regions;
do {
regions = 0;
- for (HRegionInfo hri : ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) {
+ for (RegionInfo hri : ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) {
if (hri.getTable().equals(table)) {
regions++;
}
@@ -359,7 +361,7 @@ public class TestLoadIncrementalHFilesSplitRecovery {
Mockito.when(c.getConfiguration()).thenReturn(conf);
Mockito.doNothing().when(c).close();
// Make it so we return a particular location when asked.
- final HRegionLocation loc = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO,
+ final HRegionLocation loc = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO,
ServerName.valueOf("example.org", 1234, 0));
Mockito.when(
c.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean()))
@@ -578,8 +580,8 @@ public class TestLoadIncrementalHFilesSplitRecovery {
dir = buildBulkFiles(tableName, 3);
// Mess it up by leaving a hole in the hbase:meta
- List<HRegionInfo> regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
- for (HRegionInfo regionInfo : regionInfos) {
+ List<RegionInfo> regionInfos = MetaTableAccessor.getTableRegions(connection, tableName);
+ for (RegionInfo regionInfo : regionInfos) {
if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
MetaTableAccessor.deleteRegion(connection, regionInfo);
break;
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
index d10e6e7..e499ff0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
@@ -18,6 +18,9 @@
*/
package org.apache.hadoop.hbase.util;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -42,7 +45,6 @@ import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -55,6 +57,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
@@ -68,8 +71,6 @@ import org.apache.hadoop.hbase.master.assignment.RegionStates;
import org.apache.hadoop.hbase.mob.MobFileName;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo;
import org.apache.hadoop.hbase.util.HBaseFsck.TableInfo;
@@ -77,8 +78,8 @@ import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
import org.apache.zookeeper.KeeperException;
import org.junit.rules.TestName;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
/**
* This is the base class for HBaseFsck's ability to detect reasons for inconsistent tables.
@@ -117,11 +118,14 @@ public class BaseTestHBaseFsck {
/**
* Create a new region in META.
*/
- protected HRegionInfo createRegion(final HTableDescriptor
+ protected RegionInfo createRegion(final HTableDescriptor
htd, byte[] startKey, byte[] endKey)
throws IOException {
Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
- HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
+ RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
+ .setStartKey(startKey)
+ .setEndKey(endKey)
+ .build();
MetaTableAccessor.addRegionToMeta(meta, hri);
meta.close();
return hri;
@@ -142,7 +146,7 @@ public class BaseTestHBaseFsck {
* remove its state from the Master.
*/
protected void undeployRegion(Connection conn, ServerName sn,
- HRegionInfo hri) throws IOException, InterruptedException {
+ RegionInfo hri) throws IOException, InterruptedException {
try {
HBaseFsckRepair.closeRegionSilentlyAndWait(conn, sn, hri);
if (!hri.isMetaTable()) {
@@ -163,7 +167,7 @@ public class BaseTestHBaseFsck {
byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow,
boolean hdfs) throws IOException, InterruptedException {
deleteRegion(conf, htd, startKey, endKey, unassign, metaRow, hdfs, false,
- HRegionInfo.DEFAULT_REPLICA_ID);
+ RegionInfo.DEFAULT_REPLICA_ID);
}
/**
@@ -187,7 +191,7 @@ public class BaseTestHBaseFsck {
}
for (HRegionLocation location : locations) {
- HRegionInfo hri = location.getRegionInfo();
+ RegionInfo hri = location.getRegionInfo();
ServerName hsa = location.getServerName();
if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
&& Bytes.compareTo(hri.getEndKey(), endKey) == 0
@@ -340,9 +344,9 @@ public class BaseTestHBaseFsck {
AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi);
// list all online regions from this region server
- List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
+ List<RegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
List<String> regionNames = new ArrayList<>(regions.size());
- for (HRegionInfo hri : regions) {
+ for (RegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
@@ -353,7 +357,7 @@ public class BaseTestHBaseFsck {
/**
* Returns the HSI a region info is on.
*/
- ServerName findDeployedHSI(Map<ServerName, List<String>> mm, HRegionInfo hri) {
+ ServerName findDeployedHSI(Map<ServerName, List<String>> mm, RegionInfo hri) {
for (Map.Entry<ServerName,List <String>> e : mm.entrySet()) {
if (e.getValue().contains(hri.getRegionNameAsString())) {
return e.getKey();
@@ -562,7 +566,7 @@ public class BaseTestHBaseFsck {
HRegionLocation metaLocation = connection.getRegionLocator(TableName.META_TABLE_NAME)
.getRegionLocation(HConstants.EMPTY_START_ROW);
ServerName hsa = metaLocation.getServerName();
- HRegionInfo hri = metaLocation.getRegionInfo();
+ RegionInfo hri = metaLocation.getRegionInfo();
if (unassign) {
LOG.info("Undeploying meta region " + hri + " from server " + hsa);
try (Connection unmanagedConnection = ConnectionFactory.createConnection(conf)) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index e8148d7..7463da1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -58,14 +57,19 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.io.hfile.TestHFile;
@@ -303,10 +307,10 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
// Mess it up by creating an overlap in the metadata
admin.disableTable(tableName);
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), true,
- true, false, true, HRegionInfo.DEFAULT_REPLICA_ID);
+ true, false, true, RegionInfo.DEFAULT_REPLICA_ID);
admin.enableTable(tableName);
- HRegionInfo hriOverlap =
+ RegionInfo hriOverlap =
createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B"));
TEST_UTIL.assignRegion(hriOverlap);
@@ -346,7 +350,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
assertEquals(ROWKEYS.length, countRows());
// Mess it up by creating an overlap in the metadata
- HRegionInfo hriOverlap =
+ RegionInfo hriOverlap =
createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B2"));
TEST_UTIL.assignRegion(hriOverlap);
@@ -421,13 +425,14 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"),
Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") };
- HTableDescriptor htdDisabled = new HTableDescriptor(tableName);
- htdDisabled.addFamily(new HColumnDescriptor(FAM));
+ TableDescriptor htdDisabled = TableDescriptorBuilder.newBuilder(tableName)
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM))
+ .build();
// Write the .tableinfo
FSTableDescriptors fstd = new FSTableDescriptors(conf);
fstd.createTableDescriptor(htdDisabled);
- List<HRegionInfo> disabledRegions =
+ List<RegionInfo> disabledRegions =
TEST_UTIL.createMultiRegionsInMeta(conf, htdDisabled, SPLIT_KEYS);
// Let's just assign everything to first RS
@@ -439,7 +444,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
// Disable the table and close its regions
admin.disableTable(tableName);
- HRegionInfo region = disabledRegions.remove(0);
+ RegionInfo region = disabledRegions.remove(0);
byte[] regionName = region.getRegionName();
// The region should not be assigned currently
@@ -622,8 +627,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
// make sure data in regions, if in wal only there is no data loss
admin.flush(tableName);
- HRegionInfo region1 = rl.getRegionLocation(Bytes.toBytes("A")).getRegionInfo();
- HRegionInfo region2 = rl.getRegionLocation(Bytes.toBytes("B")).getRegionInfo();
+ RegionInfo region1 = rl.getRegionLocation(Bytes.toBytes("A")).getRegionInfo();
+ RegionInfo region2 = rl.getRegionLocation(Bytes.toBytes("B")).getRegionInfo();
int regionCountBeforeMerge = rl.getAllRegionLocations().size();
@@ -818,15 +823,18 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
// Create a new meta entry to fake it as a split parent.
meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
- HRegionInfo hri = location.getRegionInfo();
-
- HRegionInfo a = new HRegionInfo(tbl.getName(),
- Bytes.toBytes("B"), Bytes.toBytes("BM"));
- HRegionInfo b = new HRegionInfo(tbl.getName(),
- Bytes.toBytes("BM"), Bytes.toBytes("C"));
-
- hri.setOffline(true);
- hri.setSplit(true);
+ RegionInfo a = RegionInfoBuilder.newBuilder(tbl.getName())
+ .setStartKey(Bytes.toBytes("B"))
+ .setEndKey(Bytes.toBytes("BM"))
+ .build();
+ RegionInfo b = RegionInfoBuilder.newBuilder(tbl.getName())
+ .setStartKey(Bytes.toBytes("BM"))
+ .setEndKey(Bytes.toBytes("C"))
+ .build();
+ RegionInfo hri = RegionInfoBuilder.newBuilder(location.getRegion())
+ .setOffline(true)
+ .setSplit(true)
+ .build();
MetaTableAccessor.addRegionToMeta(meta, hri, a, b);
meta.close();
@@ -897,7 +905,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
HRegionLocation location = rl.getRegionLocation(Bytes.toBytes("B"));
meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
- HRegionInfo hri = location.getRegionInfo();
+ RegionInfo hri = location.getRegionInfo();
// do a regular split
byte[] regionName = location.getRegionInfo().getRegionName();
@@ -916,7 +924,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
Get get = new Get(hri.getRegionName());
Result result = meta.get(get);
assertNotNull(result);
- assertNotNull(MetaTableAccessor.getHRegionInfo(result));
+ assertNotNull(MetaTableAccessor.getRegionInfo(result));
assertEquals(ROWKEYS.length, countRows());
@@ -949,7 +957,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
HRegionLocation location = rl.getRegionLocation(Bytes.toBytes("B"));
- HRegionInfo hri = location.getRegionInfo();
+ RegionInfo hri = location.getRegionInfo();
// Disable CatalogJanitor to prevent it from cleaning up the parent region
// after split.
@@ -960,7 +968,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit(conf, 60000, regionName, true);
- PairOfSameType<HRegionInfo> daughters = MetaTableAccessor.getDaughterRegions(
+ PairOfSameType<RegionInfo> daughters = MetaTableAccessor.getDaughterRegions(
meta.get(new Get(regionName)));
// Delete daughter regions from meta, but not hdfs, unassign it.
@@ -1002,7 +1010,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
Get get = new Get(hri.getRegionName());
Result result = meta.get(get);
assertNotNull(result);
- assertNotNull(MetaTableAccessor.getHRegionInfo(result));
+ assertNotNull(MetaTableAccessor.getRegionInfo(result));
assertEquals(ROWKEYS.length, countRows());
@@ -1116,7 +1124,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
// Mess it up by closing a region
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), true,
- false, false, false, HRegionInfo.DEFAULT_REPLICA_ID);
+ false, false, false, RegionInfo.DEFAULT_REPLICA_ID);
// verify there is no other errors
HBaseFsck hbck = doFsck(conf, false);
@@ -1172,7 +1180,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
// Mess it up by deleting a region from the metadata
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"),
- Bytes.toBytes("B"), false, true, false, false, HRegionInfo.DEFAULT_REPLICA_ID);
+ Bytes.toBytes("B"), false, true, false, false, RegionInfo.DEFAULT_REPLICA_ID);
// verify there is no other errors
HBaseFsck hbck = doFsck(conf, false);
@@ -1234,10 +1242,10 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
// Mess it up by creating an overlap in the metadata
admin.disableTable(tableName);
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"), true,
- true, false, true, HRegionInfo.DEFAULT_REPLICA_ID);
+ true, false, true, RegionInfo.DEFAULT_REPLICA_ID);
admin.enableTable(tableName);
- HRegionInfo hriOverlap =
+ RegionInfo hriOverlap =
createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B"));
TEST_UTIL.assignRegion(hriOverlap);
@@ -1365,7 +1373,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
assertEquals(ROWKEYS.length, countRows());
// Now let's mess it up, by adding a region with a duplicate startkey
- HRegionInfo hriDupe =
+ RegionInfo hriDupe =
createRegion(tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("B"));
TEST_UTIL.assignRegion(hriDupe);
@@ -1410,7 +1418,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
@Override
public boolean visit(Result rowResult) throws IOException {
- HRegionInfo hri = MetaTableAccessor.getHRegionInfo(rowResult);
+ RegionInfo hri = MetaTableAccessor.getRegionInfo(rowResult);
if (hri != null && !hri.getTable().isSystemTable()) {
Delete delete = new Delete(rowResult.getRow());
delete.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
@@ -1535,7 +1543,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
// Mess it up by leaving a hole in the meta data
admin.disableTable(tableName);
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true,
- true, false, true, HRegionInfo.DEFAULT_REPLICA_ID);
+ true, false, true, RegionInfo.DEFAULT_REPLICA_ID);
admin.enableTable(tableName);
HBaseFsck hbck = doFsck(conf, false);
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java
index 1971049..341bd34 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java
@@ -17,23 +17,42 @@
*/
package org.apache.hadoop.hbase.util;
+import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertErrors;
+import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertNoErrors;
+import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.doFsck;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Set;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.HRegionLocation;
-
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.junit.AfterClass;
@@ -45,22 +64,6 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Set;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.*;
-import static org.junit.Assert.*;
-
@Ignore
@Category({MiscTests.class, LargeTests.class})
public class TestHBaseFsckReplicas extends BaseTestHBaseFsck {
@@ -189,15 +192,15 @@ public class TestHBaseFsckReplicas extends BaseTestHBaseFsck {
// for the master to treat the request for assignment as valid; the master
// checks the region is valid either from its memory or meta)
Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
- List<HRegionInfo> regions = admin.getTableRegions(tableName);
+ List<RegionInfo> regions = admin.getRegions(tableName);
byte[] startKey = Bytes.toBytes("B");
byte[] endKey = Bytes.toBytes("C");
byte[] metaKey = null;
- HRegionInfo newHri = null;
- for (HRegionInfo h : regions) {
+ RegionInfo newHri = null;
+ for (RegionInfo h : regions) {
if (Bytes.compareTo(h.getStartKey(), startKey) == 0 &&
Bytes.compareTo(h.getEndKey(), endKey) == 0 &&
- h.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+ h.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
metaKey = h.getRegionName();
//create a hri with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
newHri = RegionReplicaUtil.getRegionInfoForReplica(h, 2);
@@ -241,15 +244,15 @@ public class TestHBaseFsckReplicas extends BaseTestHBaseFsck {
public void testNotInHdfsWithReplicas() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
- HRegionInfo[] oldHris = new HRegionInfo[2];
+ RegionInfo[] oldHris = new RegionInfo[2];
setupTableWithRegionReplica(tableName, 2);
assertEquals(ROWKEYS.length, countRows());
- NavigableMap<HRegionInfo, ServerName> map =
+ NavigableMap<RegionInfo, ServerName> map =
MetaTableAccessor.allTableRegions(TEST_UTIL.getConnection(),
tbl.getName());
int i = 0;
// store the HRIs of the regions we will mess up
- for (Map.Entry<HRegionInfo, ServerName> m : map.entrySet()) {
+ for (Map.Entry<RegionInfo, ServerName> m : map.entrySet()) {
if (m.getKey().getStartKey().length > 0 &&
m.getKey().getStartKey()[0] == Bytes.toBytes("B")[0]) {
LOG.debug("Initially server hosting " + m.getKey() + " is " + m.getValue());
@@ -276,11 +279,11 @@ public class TestHBaseFsckReplicas extends BaseTestHBaseFsck {
// the following code checks whether the old primary/secondary has
// been unassigned and the new primary/secondary has been assigned
i = 0;
- HRegionInfo[] newHris = new HRegionInfo[2];
+ RegionInfo[] newHris = new RegionInfo[2];
// get all table's regions from meta
map = MetaTableAccessor.allTableRegions(TEST_UTIL.getConnection(), tbl.getName());
// get the HRIs of the new regions (hbck created new regions for fixing the hdfs mess-up)
- for (Map.Entry<HRegionInfo, ServerName> m : map.entrySet()) {
+ for (Map.Entry<RegionInfo, ServerName> m : map.entrySet()) {
if (m.getKey().getStartKey().length > 0 &&
m.getKey().getStartKey()[0] == Bytes.toBytes("B")[0]) {
newHris[i++] = m.getKey();
@@ -289,9 +292,9 @@ public class TestHBaseFsckReplicas extends BaseTestHBaseFsck {
// get all the online regions in the regionservers
Collection<ServerName> servers =
admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers();
- Set<HRegionInfo> onlineRegions = new HashSet<>();
+ Set<RegionInfo> onlineRegions = new HashSet<>();
for (ServerName s : servers) {
- List<HRegionInfo> list = admin.getOnlineRegions(s);
+ List<RegionInfo> list = admin.getRegions(s);
onlineRegions.addAll(list);
}
// the new HRIs must be a subset of the online regions
@@ -325,7 +328,7 @@ public class TestHBaseFsckReplicas extends BaseTestHBaseFsck {
admin.enableCatalogJanitor(false);
meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
HRegionLocation loc = this.connection.getRegionLocation(table, SPLITS[0], false);
- HRegionInfo hriParent = loc.getRegionInfo();
+ RegionInfo hriParent = loc.getRegionInfo();
// Split Region A just before B
this.connection.getAdmin().split(table, Bytes.toBytes("A@"));