You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2013/08/08 06:19:56 UTC
svn commit: r1511577 [20/23] - in /hbase/trunk:
hbase-client/src/main/java/org/apache/hadoop/hbase/
hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/
hbase-client/src/main/java/org/apache/hadoop/hbase/client/
hbase-client/src/main/java/org/ap...
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java Thu Aug 8 04:19:49 2013
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HRegionIn
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest;
@@ -91,10 +92,10 @@ public class TestRSStatusServlet {
@Test
public void testWithRegions() throws IOException, ServiceException {
- HTableDescriptor htd = new HTableDescriptor("mytable");
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("mytable"));
List<HRegionInfo> regions = Lists.newArrayList(
- new HRegionInfo(htd.getName(), Bytes.toBytes("a"), Bytes.toBytes("d")),
- new HRegionInfo(htd.getName(), Bytes.toBytes("d"), Bytes.toBytes("z"))
+ new HRegionInfo(htd.getTableName(), Bytes.toBytes("a"), Bytes.toBytes("d")),
+ new HRegionInfo(htd.getTableName(), Bytes.toBytes("d"), Bytes.toBytes("z"))
);
Mockito.doReturn(ResponseConverter.buildGetOnlineRegionResponse(
regions)).when(rs).getOnlineRegion((RpcController)Mockito.any(),
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java Thu Aug 8 04:19:49 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.BlockLocatio
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.client.HTable;
@@ -52,7 +53,8 @@ public class TestRegionFavoredNodes {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static HTable table;
- private static final byte[] TABLE_NAME = Bytes.toBytes("table");
+ private static final TableName TABLE_NAME =
+ TableName.valueOf("table");
private static final byte[] COLUMN_FAMILY = Bytes.toBytes("family");
private static final int FAVORED_NODES_NUM = 3;
private static final int REGION_SERVERS = 6;
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java Thu Aug 8 04:19:49 2013
@@ -29,6 +29,7 @@ import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@@ -346,7 +347,8 @@ public class TestRegionMergeTransaction
@Test
public void testMeregedRegionBoundary() {
- byte[] tableName = Bytes.toBytes("testMeregedRegionBoundary");
+ TableName tableName =
+ TableName.valueOf("testMeregedRegionBoundary");
byte[] a = Bytes.toBytes("a");
byte[] b = Bytes.toBytes("b");
byte[] z = Bytes.toBytes("z");
@@ -354,19 +356,19 @@ public class TestRegionMergeTransaction
HRegionInfo r2 = new HRegionInfo(tableName, a, z);
HRegionInfo m = RegionMergeTransaction.getMergedRegionInfo(r1, r2);
assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey())
- && Bytes.equals(m.getEndKey(), r1.getEndKey()));
+ && Bytes.equals(m.getEndKey(), r1.getEndKey()));
r1 = new HRegionInfo(tableName, null, a);
r2 = new HRegionInfo(tableName, a, z);
m = RegionMergeTransaction.getMergedRegionInfo(r1, r2);
assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey())
- && Bytes.equals(m.getEndKey(), r2.getEndKey()));
+ && Bytes.equals(m.getEndKey(), r2.getEndKey()));
r1 = new HRegionInfo(tableName, null, a);
r2 = new HRegionInfo(tableName, z, null);
m = RegionMergeTransaction.getMergedRegionInfo(r1, r2);
assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey())
- && Bytes.equals(m.getEndKey(), r2.getEndKey()));
+ && Bytes.equals(m.getEndKey(), r2.getEndKey()));
r1 = new HRegionInfo(tableName, a, z);
r2 = new HRegionInfo(tableName, z, null);
@@ -396,10 +398,10 @@ public class TestRegionMergeTransaction
final byte[] startrow, final byte[] endrow)
throws IOException {
// Make a region with start and end keys.
- HTableDescriptor htd = new HTableDescriptor("table");
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
HColumnDescriptor hcd = new HColumnDescriptor(CF);
htd.addFamily(hcd);
- HRegionInfo hri = new HRegionInfo(htd.getName(), startrow, endrow);
+ HRegionInfo hri = new HRegionInfo(htd.getTableName(), startrow, endrow);
HRegion a = HRegion.createHRegion(hri, testdir,
TEST_UTIL.getConfiguration(), htd);
HRegion.closeHRegion(a);
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java Thu Aug 8 04:19:49 2013
@@ -31,6 +31,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -52,6 +53,7 @@ import org.apache.hadoop.hbase.master.HM
import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
@@ -107,7 +109,8 @@ public class TestRegionMergeTransactionO
@Test
public void testWholesomeMerge() throws Exception {
LOG.info("Starting testWholesomeMerge");
- final byte[] tableName = Bytes.toBytes("testWholesomeMerge");
+ final TableName tableName =
+ TableName.valueOf("testWholesomeMerge");
// Create table and load data.
HTable table = createTableAndLoadData(master, tableName);
@@ -149,7 +152,8 @@ public class TestRegionMergeTransactionO
LOG.info("Starting testCleanMergeReference");
admin.enableCatalogJanitor(false);
try {
- final byte[] tableName = Bytes.toBytes("testCleanMergeReference");
+ final TableName tableName =
+ TableName.valueOf("testCleanMergeReference");
// Create table and load data.
HTable table = createTableAndLoadData(master, tableName);
// Merge 1st and 2nd region
@@ -160,10 +164,10 @@ public class TestRegionMergeTransactionO
List<Pair<HRegionInfo, ServerName>> tableRegions = MetaReader
.getTableRegionsAndLocations(master.getCatalogTracker(),
- Bytes.toString(tableName));
+ tableName);
HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
HTableDescriptor tableDescritor = master.getTableDescriptors().get(
- Bytes.toString(tableName));
+ tableName);
Result mergedRegionResult = MetaReader.getRegionResult(
master.getCatalogTracker(), mergedRegionInfo.getRegionName());
@@ -181,7 +185,7 @@ public class TestRegionMergeTransactionO
FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path rootDir = master.getMasterFileSystem().getRootDir();
- Path tabledir = new Path(rootDir, mergedRegionInfo.getTableNameAsString());
+ Path tabledir = FSUtils.getTableDir(rootDir, mergedRegionInfo.getTableName());
Path regionAdir = new Path(tabledir, regionA.getEncodedName());
Path regionBdir = new Path(tabledir, regionB.getEncodedName());
assertTrue(fs.exists(regionAdir));
@@ -228,7 +232,7 @@ public class TestRegionMergeTransactionO
@Test
public void testMerge() throws Exception {
LOG.info("Starting testMerge");
- final byte[] tableName = Bytes.toBytes("testMerge");
+ final TableName tableName = TableName.valueOf("testMerge");
try {
// Create table and load data.
@@ -272,7 +276,7 @@ public class TestRegionMergeTransactionO
}
private PairOfSameType<HRegionInfo> mergeRegionsAndVerifyRegionNum(
- HMaster master, byte[] tablename,
+ HMaster master, TableName tablename,
int regionAnum, int regionBnum, int expectedRegionNum) throws Exception {
PairOfSameType<HRegionInfo> mergedRegions =
requestMergeRegion(master, tablename, regionAnum, regionBnum);
@@ -281,11 +285,11 @@ public class TestRegionMergeTransactionO
}
private PairOfSameType<HRegionInfo> requestMergeRegion(
- HMaster master, byte[] tablename,
+ HMaster master, TableName tablename,
int regionAnum, int regionBnum) throws Exception {
List<Pair<HRegionInfo, ServerName>> tableRegions = MetaReader
.getTableRegionsAndLocations(master.getCatalogTracker(),
- Bytes.toString(tablename));
+ tablename);
HRegionInfo regionA = tableRegions.get(regionAnum).getFirst();
HRegionInfo regionB = tableRegions.get(regionBnum).getFirst();
TEST_UTIL.getHBaseAdmin().mergeRegions(
@@ -294,14 +298,14 @@ public class TestRegionMergeTransactionO
return new PairOfSameType<HRegionInfo>(regionA, regionB);
}
- private void waitAndVerifyRegionNum(HMaster master, byte[] tablename,
+ private void waitAndVerifyRegionNum(HMaster master, TableName tablename,
int expectedRegionNum) throws Exception {
List<Pair<HRegionInfo, ServerName>> tableRegionsInMeta;
List<HRegionInfo> tableRegionsInMaster;
long timeout = System.currentTimeMillis() + waitTime;
while (System.currentTimeMillis() < timeout) {
tableRegionsInMeta = MetaReader.getTableRegionsAndLocations(
- master.getCatalogTracker(), Bytes.toString(tablename));
+ master.getCatalogTracker(), tablename);
tableRegionsInMaster = master.getAssignmentManager().getRegionStates()
.getRegionsOfTable(tablename);
if (tableRegionsInMeta.size() == expectedRegionNum
@@ -312,17 +316,17 @@ public class TestRegionMergeTransactionO
}
tableRegionsInMeta = MetaReader.getTableRegionsAndLocations(
- master.getCatalogTracker(), Bytes.toString(tablename));
+ master.getCatalogTracker(), tablename);
LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta));
assertEquals(expectedRegionNum, tableRegionsInMeta.size());
}
- private HTable createTableAndLoadData(HMaster master, byte[] tablename)
+ private HTable createTableAndLoadData(HMaster master, TableName tablename)
throws Exception {
return createTableAndLoadData(master, tablename, INITIAL_REGION_NUM);
}
- private HTable createTableAndLoadData(HMaster master, byte[] tablename,
+ private HTable createTableAndLoadData(HMaster master, TableName tablename,
int numRegions) throws Exception {
assertTrue("ROWSIZE must > numregions:" + numRegions, ROWSIZE > numRegions);
byte[][] splitRows = new byte[numRegions - 1][];
@@ -339,14 +343,14 @@ public class TestRegionMergeTransactionO
List<Pair<HRegionInfo, ServerName>> tableRegions;
while (System.currentTimeMillis() < timeout) {
tableRegions = MetaReader.getTableRegionsAndLocations(
- master.getCatalogTracker(), Bytes.toString(tablename));
+ master.getCatalogTracker(), tablename);
if (tableRegions.size() == numRegions)
break;
Thread.sleep(250);
}
tableRegions = MetaReader.getTableRegionsAndLocations(
- master.getCatalogTracker(), Bytes.toString(tablename));
+ master.getCatalogTracker(), tablename);
LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
assertEquals(numRegions, tableRegions.size());
return table;
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java Thu Aug 8 04:19:49 2013
@@ -62,7 +62,7 @@ public class TestRegionServerNoMaster {
@BeforeClass
public static void before() throws Exception {
HTU.startMiniCluster(NB_SERVERS);
- final byte[] tableName = Bytes.toBytes(TestRegionServerNoMaster.class.getName());
+ final byte[] tableName = Bytes.toBytes(TestRegionServerNoMaster.class.getSimpleName());
// Create table then get the single region for our new table.
table = HTU.createTable(tableName, HConstants.CATALOG_FAMILY);
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java Thu Aug 8 04:19:49 2013
@@ -28,6 +28,7 @@ import java.util.List;
import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -46,7 +47,7 @@ public class TestRegionSplitPolicy {
private HTableDescriptor htd;
private HRegion mockRegion;
private TreeMap<byte[], HStore> stores;
- private static final byte [] TABLENAME = new byte [] {'t'};
+ private static final TableName TABLENAME = TableName.valueOf("t");
@Before
public void setupMocks() {
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java Thu Aug 8 04:19:49 2013
@@ -56,10 +56,10 @@ public class TestResettingCounters {
byte [][] rows = new byte [numRows][];
for (int i=0; i<numRows; i++) rows[i] = Bytes.toBytes("r" + i);
- HTableDescriptor htd = new HTableDescriptor(table);
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family));
- HRegionInfo hri = new HRegionInfo(htd.getName(), null, null, false);
+ HRegionInfo hri = new HRegionInfo(htd.getTableName(), null, null, false);
String testDir = htu.getDataTestDir() + "/TestResettingCounters/";
Path path = new Path(testDir);
if (fs.exists(path)) {
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java Thu Aug 8 04:19:49 2013
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HRegionIn
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
@@ -61,7 +62,7 @@ public class TestScanner extends HBaseTe
};
static final HTableDescriptor TESTTABLEDESC =
- new HTableDescriptor("testscanner");
+ new HTableDescriptor(TableName.valueOf("testscanner"));
static {
TESTTABLEDESC.addFamily(
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
@@ -73,7 +74,7 @@ public class TestScanner extends HBaseTe
}
/** HRegionInfo for root region */
public static final HRegionInfo REGION_INFO =
- new HRegionInfo(TESTTABLEDESC.getName(), HConstants.EMPTY_BYTE_ARRAY,
+ new HRegionInfo(TESTTABLEDESC.getTableName(), HConstants.EMPTY_BYTE_ARRAY,
HConstants.EMPTY_BYTE_ARRAY);
private static final byte [] ROW_KEY = REGION_INFO.getRegionName();
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java Thu Aug 8 04:19:49 2013
@@ -139,7 +139,7 @@ public class TestSeekOptimizations {
@Test
public void testMultipleTimestampRanges() throws IOException {
- region = TEST_UTIL.createTestRegion(TestSeekOptimizations.class.getName(),
+ region = TEST_UTIL.createTestRegion("testMultipleTimestampRanges",
new HColumnDescriptor(FAMILY)
.setCompressionType(comprAlgo)
.setBloomFilterType(bloomType)
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java Thu Aug 8 04:19:49 2013
@@ -38,8 +38,8 @@ import org.apache.hadoop.hbase.HTableDes
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
@@ -330,10 +330,10 @@ public class TestSplitTransaction {
throws IOException {
// Make a region with start and end keys. Use 'aaa', to 'AAA'. The load
// region utility will add rows between 'aaa' and 'zzz'.
- HTableDescriptor htd = new HTableDescriptor("table");
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
HColumnDescriptor hcd = new HColumnDescriptor(CF);
htd.addFamily(hcd);
- HRegionInfo hri = new HRegionInfo(htd.getName(), STARTROW, ENDROW);
+ HRegionInfo hri = new HRegionInfo(htd.getTableName(), STARTROW, ENDROW);
HRegion r = HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
HRegion.closeHRegion(r);
return HRegion.openHRegion(testdir, hri, htd, wal,
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java Thu Aug 8 04:19:49 2013
@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -162,16 +163,16 @@ public class TestSplitTransactionOnClust
@Test(timeout = 60000)
public void testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack() throws Exception {
- final byte[] tableName = Bytes
- .toBytes("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack");
+ final TableName tableName =
+ TableName.valueOf("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack");
try {
// Create table then get the single region for our new table.
- HTable t = createTableAndWait(tableName, Bytes.toBytes("cf"));
+ HTable t = createTableAndWait(tableName.getName(), Bytes.toBytes("cf"));
final List<HRegion> regions = cluster.getRegions(tableName);
HRegionInfo hri = getAndCheckSingleTableRegion(regions);
int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
final HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
- insertData(tableName, admin, t);
+ insertData(tableName.getName(), admin, t);
t.close();
// Turn off balancer so it doesn't cut in and mess up our placements.
@@ -216,10 +217,11 @@ public class TestSplitTransactionOnClust
RegionStates regionStates = cluster.getMaster().getAssignmentManager().getRegionStates();
Map<String, RegionState> rit = regionStates.getRegionsInTransition();
- for (int i=0; rit.containsKey(hri.getTableNameAsString()) && i<100; i++) {
+ for (int i=0; rit.containsKey(hri.getTableName()) && i<100; i++) {
Thread.sleep(100);
}
- assertFalse("region still in transition", rit.containsKey(rit.containsKey(hri.getTableNameAsString())));
+ assertFalse("region still in transition", rit.containsKey(
+ rit.containsKey(hri.getTableName())));
List<HRegion> onlineRegions = regionServer.getOnlineRegions(tableName);
// Region server side split is successful.
@@ -460,7 +462,8 @@ public class TestSplitTransactionOnClust
public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exception {
Configuration conf = TESTING_UTIL.getConfiguration();
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TESTING_UTIL);
- String userTableName = "testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles";
+ TableName userTableName =
+ TableName.valueOf("testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles");
HTableDescriptor htd = new HTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("col");
htd.addFamily(hcd);
@@ -473,16 +476,16 @@ public class TestSplitTransactionOnClust
String val = "Val" + i;
p.add("col".getBytes(), "ql".getBytes(), val.getBytes());
table.put(p);
- admin.flush(userTableName);
+ admin.flush(userTableName.getName());
Delete d = new Delete(row.getBytes());
// Do a normal delete
table.delete(d);
- admin.flush(userTableName);
+ admin.flush(userTableName.getName());
}
- admin.majorCompact(userTableName);
+ admin.majorCompact(userTableName.getName());
List<HRegionInfo> regionsOfTable = TESTING_UTIL.getMiniHBaseCluster()
.getMaster().getAssignmentManager().getRegionStates()
- .getRegionsOfTable(userTableName.getBytes());
+ .getRegionsOfTable(userTableName);
HRegionInfo hRegionInfo = regionsOfTable.get(0);
Put p = new Put("row6".getBytes());
p.add("col".getBytes(), "ql".getBytes(), "val".getBytes());
@@ -493,17 +496,17 @@ public class TestSplitTransactionOnClust
p = new Put("row8".getBytes());
p.add("col".getBytes(), "ql".getBytes(), "val".getBytes());
table.put(p);
- admin.flush(userTableName);
+ admin.flush(userTableName.getName());
admin.split(hRegionInfo.getRegionName(), "row7".getBytes());
regionsOfTable = TESTING_UTIL.getMiniHBaseCluster().getMaster()
.getAssignmentManager().getRegionStates()
- .getRegionsOfTable(userTableName.getBytes());
+ .getRegionsOfTable(userTableName);
while (regionsOfTable.size() != 2) {
Thread.sleep(2000);
regionsOfTable = TESTING_UTIL.getMiniHBaseCluster().getMaster()
.getAssignmentManager().getRegionStates()
- .getRegionsOfTable(userTableName.getBytes());
+ .getRegionsOfTable(userTableName);
}
Assert.assertEquals(2, regionsOfTable.size());
Scan s = new Scan();
@@ -701,22 +704,22 @@ public class TestSplitTransactionOnClust
@Test(timeout = 60000)
public void testTableExistsIfTheSpecifiedTableRegionIsSplitParent() throws Exception {
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TESTING_UTIL);
- final byte[] tableName =
- Bytes.toBytes("testTableExistsIfTheSpecifiedTableRegionIsSplitParent");
+ final TableName tableName =
+ TableName.valueOf("testTableExistsIfTheSpecifiedTableRegionIsSplitParent");
// Create table then get the single region for our new table.
- HTable t = createTableAndWait(tableName, Bytes.toBytes("cf"));
+ HTable t = createTableAndWait(tableName.getName(), Bytes.toBytes("cf"));
List<HRegion> regions = null;
try {
regions = cluster.getRegions(tableName);
int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName());
HRegionServer regionServer = cluster.getRegionServer(regionServerIndex);
- insertData(tableName, admin, t);
+ insertData(tableName.getName(), admin, t);
// Turn off balancer so it doesn't cut in and mess up our placements.
admin.setBalancerRunning(false, true);
// Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false);
boolean tableExists = MetaReader.tableExists(regionServer.getCatalogTracker(),
- Bytes.toString(tableName));
+ tableName);
assertEquals("The specified table should present.", true, tableExists);
final HRegion region = findSplittableRegion(regions);
assertTrue("not able to find a splittable region", region != null);
@@ -728,7 +731,7 @@ public class TestSplitTransactionOnClust
}
tableExists = MetaReader.tableExists(regionServer.getCatalogTracker(),
- Bytes.toString(tableName));
+ tableName);
assertEquals("The specified table should present.", true, tableExists);
} finally {
if (regions != null) {
@@ -766,9 +769,10 @@ public class TestSplitTransactionOnClust
@Test
public void testSplitRegionWithNoStoreFiles()
throws Exception {
- final byte[] tableName = Bytes.toBytes("testSplitRegionWithNoStoreFiles");
+ final TableName tableName =
+ TableName.valueOf("testSplitRegionWithNoStoreFiles");
// Create table then get the single region for our new table.
- createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
+ createTableAndWait(tableName.getName(), HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
HRegionInfo hri = getAndCheckSingleTableRegion(regions);
ensureTableRegionNotOnSameServerAsMeta(admin, hri);
@@ -900,7 +904,7 @@ public class TestSplitTransactionOnClust
@Override
void transitionZKNode(Server server, RegionServerServices services, HRegion a, HRegion b)
throws IOException {
- if (this.currentRegion.getRegionInfo().getTableNameAsString()
+ if (this.currentRegion.getRegionInfo().getTableName().getNameAsString()
.equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) {
try {
if (!secondSplit){
@@ -912,14 +916,14 @@ public class TestSplitTransactionOnClust
}
super.transitionZKNode(server, services, a, b);
- if (this.currentRegion.getRegionInfo().getTableNameAsString()
+ if (this.currentRegion.getRegionInfo().getTableName().getNameAsString()
.equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) {
firstSplitCompleted = true;
}
}
@Override
public boolean rollback(Server server, RegionServerServices services) throws IOException {
- if (this.currentRegion.getRegionInfo().getTableNameAsString()
+ if (this.currentRegion.getRegionInfo().getTableName().getNameAsString()
.equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) {
if(secondSplit){
super.rollback(server, services);
@@ -983,7 +987,7 @@ public class TestSplitTransactionOnClust
}
private void removeDaughterFromMeta(final byte [] regionName) throws IOException {
- HTable metaTable = new HTable(TESTING_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
+ HTable metaTable = new HTable(TESTING_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
try {
Delete d = new Delete(regionName);
LOG.info("Deleted " + Bytes.toString(regionName));
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Thu Aug 8 04:19:49 2013
@@ -38,7 +38,6 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
@@ -51,13 +50,13 @@ import org.apache.hadoop.hbase.HTableDes
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
@@ -67,14 +66,13 @@ import org.apache.hadoop.hbase.security.
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
import org.apache.hadoop.util.Progressable;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
-import com.google.common.base.Joiner;
-
/**
* Test class for the Store
*/
@@ -141,7 +139,7 @@ public class TestStore extends TestCase
private void init(String methodName, Configuration conf,
HColumnDescriptor hcd) throws IOException {
- HTableDescriptor htd = new HTableDescriptor(table);
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
init(methodName, conf, htd, hcd);
}
@@ -149,6 +147,7 @@ public class TestStore extends TestCase
HColumnDescriptor hcd) throws IOException {
//Setting up a Store
Path basedir = new Path(DIR+methodName);
+ Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
String logName = "logs";
Path logdir = new Path(basedir, logName);
@@ -157,9 +156,9 @@ public class TestStore extends TestCase
fs.delete(logdir, true);
htd.addFamily(hcd);
- HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
+ HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
- HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
+ HRegion region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
store = new HStore(region, hcd, conf);
}
@@ -841,7 +840,7 @@ public class TestStore extends TestCase
// HTD overrides XML.
--anyValue;
- HTableDescriptor htd = new HTableDescriptor(table);
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
HColumnDescriptor hcd = new HColumnDescriptor(family);
htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
init(getName() + "-htd", conf, htd, hcd);
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Thu Aug 8 04:19:49 2013
@@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@@ -89,9 +90,10 @@ public class TestStoreFile extends HBase
* @throws Exception
*/
public void testBasicHalfMapFile() throws Exception {
- final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testBasicHalfMapFileTb"));
+ final HRegionInfo hri =
+ new HRegionInfo(TableName.valueOf("testBasicHalfMapFileTb"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
- conf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
+ conf, fs, new Path(this.testDir, hri.getTableName().getNameAsString()), hri);
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs, 2 * 1024)
.withFilePath(regionFs.createTempName())
@@ -138,9 +140,9 @@ public class TestStoreFile extends HBase
* @throws IOException
*/
public void testReference() throws IOException {
- final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testReferenceTb"));
+ final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
- conf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
+ conf, fs, new Path(this.testDir, hri.getTableName().getNameAsString()), hri);
// Make a store file and write data to it.
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs, 8 * 1024)
@@ -179,12 +181,12 @@ public class TestStoreFile extends HBase
}
public void testHFileLink() throws IOException {
- final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testHFileLinkTb"));
+ final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb"));
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, this.testDir);
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
- testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
+ testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTableName()), hri);
// Make a store file and write data to it.
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024)
@@ -224,9 +226,9 @@ public class TestStoreFile extends HBase
FSUtils.setRootDir(testConf, this.testDir);
// adding legal table name chars to verify regex handles it.
- HRegionInfo hri = new HRegionInfo(Bytes.toBytes("_original-evil-name"));
+ HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
- testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
+ testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTableName()), hri);
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024)
@@ -236,9 +238,10 @@ public class TestStoreFile extends HBase
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
// create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
- HRegionInfo hriClone = new HRegionInfo(Bytes.toBytes("clone"));
+ HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone"));
HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(
- testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hriClone);
+ testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTableName()),
+ hriClone);
Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
@@ -297,10 +300,12 @@ public class TestStoreFile extends HBase
KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
byte [] midRow = midKV.getRow();
// Create top split.
- HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), null, midRow);
+ HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(),
+ null, midRow);
Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true);
// Create bottom split.
- HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), midRow, null);
+ HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(),
+ midRow, null);
Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false);
// Make readers on top and bottom.
StoreFile.Reader top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java Thu Aug 8 04:19:49 2013
@@ -46,7 +46,7 @@ public class TestWideScanner extends HBa
static byte[][] COLUMNS = { A, B, C };
static final Random rng = new Random();
static final HTableDescriptor TESTTABLEDESC =
- new HTableDescriptor("testwidescan");
+ new HTableDescriptor(TableName.valueOf("testwidescan"));
static {
for (byte[] cfName : new byte[][] { A, B, C }) {
TESTTABLEDESC.addFamily(new HColumnDescriptor(cfName)
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java Thu Aug 8 04:19:49 2013
@@ -25,6 +25,7 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@@ -57,7 +58,7 @@ public class TestCloseRegionHandler {
static final Log LOG = LogFactory.getLog(TestCloseRegionHandler.class);
private final static HBaseTestingUtility HTU = new HBaseTestingUtility();
private static final HTableDescriptor TEST_HTD =
- new HTableDescriptor("TestCloseRegionHandler");
+ new HTableDescriptor(TableName.valueOf("TestCloseRegionHandler"));
private HRegionInfo TEST_HRI;
private int testIndex = 0;
@@ -76,7 +77,7 @@ public class TestCloseRegionHandler {
*/
@Before
public void setupHRI() {
- TEST_HRI = new HRegionInfo(TEST_HTD.getName(),
+ TEST_HRI = new HRegionInfo(TEST_HTD.getTableName(),
Bytes.toBytes(testIndex),
Bytes.toBytes(testIndex + 1));
testIndex++;
@@ -95,7 +96,7 @@ public class TestCloseRegionHandler {
final RegionServerServices rss = new MockRegionServerServices();
HTableDescriptor htd = TEST_HTD;
final HRegionInfo hri =
- new HRegionInfo(htd.getName(), HConstants.EMPTY_END_ROW,
+ new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
HConstants.EMPTY_END_ROW);
HRegion region =
HRegion.createHRegion(hri, HTU.getDataTestDir(),
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java Thu Aug 8 04:19:49 2013
@@ -56,7 +56,7 @@ public class TestOpenRegionHandler {
@BeforeClass public static void before() throws Exception {
HTU.startMiniZKCluster();
- TEST_HTD = new HTableDescriptor("TestOpenRegionHandler.java");
+ TEST_HTD = new HTableDescriptor(TableName.valueOf("TestOpenRegionHandler.java"));
}
@AfterClass public static void after() throws IOException {
@@ -71,7 +71,7 @@ public class TestOpenRegionHandler {
*/
@Before
public void setupHRI() {
- TEST_HRI = new HRegionInfo(TEST_HTD.getName(),
+ TEST_HRI = new HRegionInfo(TEST_HTD.getTableName(),
Bytes.toBytes(testIndex),
Bytes.toBytes(testIndex + 1));
testIndex++;
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java Thu Aug 8 04:19:49 2013
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HRegionIn
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
@@ -224,7 +225,7 @@ public final class HLogPerformanceEvalua
}
private static HTableDescriptor createHTableDescriptor(final int numFamilies) {
- HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
for (int i = 0; i < numFamilies; ++i) {
HColumnDescriptor colDef = new HColumnDescriptor(FAMILY_PREFIX + i);
htd.addFamily(colDef);
@@ -296,7 +297,7 @@ public final class HLogPerformanceEvalua
private HRegion openRegion(final FileSystem fs, final Path dir, final HTableDescriptor htd, final HLog hlog)
throws IOException {
// Initialize HRegion
- HRegionInfo regionInfo = new HRegionInfo(htd.getName());
+ HRegionInfo regionInfo = new HRegionInfo(htd.getTableName());
return HRegion.createHRegion(regionInfo, dir, getConf(), htd, hlog);
}
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java Thu Aug 8 04:19:49 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HColumnDe
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -150,11 +151,11 @@ public class TestDurability {
// lifted from TestAtomicOperation
private HRegion createHRegion (byte [] tableName, String callingMethod, HLog log, boolean isDeferredLogFlush)
throws IOException {
- HTableDescriptor htd = new HTableDescriptor(tableName);
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.setDeferredLogFlush(isDeferredLogFlush);
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
htd.addFamily(hcd);
- HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
+ HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
if (FS.exists(path)) {
if (!FS.delete(path, true)) {
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java Thu Aug 8 04:19:49 2013
@@ -158,14 +158,15 @@ public class TestHLog {
@Test
public void testSplit() throws IOException {
- final byte [] tableName = Bytes.toBytes(getName());
- final byte [] rowName = tableName;
+ final TableName tableName =
+ TableName.valueOf(getName());
+ final byte [] rowName = tableName.getName();
Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
HLog log = HLogFactory.createHLog(fs, hbaseDir,
HConstants.HREGION_LOGDIR_NAME, conf);
final int howmany = 3;
HRegionInfo[] infos = new HRegionInfo[3];
- Path tabledir = new Path(hbaseDir, getName());
+ Path tabledir = FSUtils.getTableDir(hbaseDir, tableName);
fs.mkdirs(tabledir);
for(int i = 0; i < howmany; i++) {
infos[i] = new HRegionInfo(tableName,
@@ -213,11 +214,12 @@ public class TestHLog {
*/
@Test
public void Broken_testSync() throws Exception {
- byte [] bytes = Bytes.toBytes(getName());
+ TableName tableName =
+ TableName.valueOf(getName());
// First verify that using streams all works.
Path p = new Path(dir, getName() + ".fsdos");
FSDataOutputStream out = fs.create(p);
- out.write(bytes);
+ out.write(tableName.getName());
Method syncMethod = null;
try {
syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
@@ -234,7 +236,7 @@ public class TestHLog {
assertTrue(in.available() > 0);
byte [] buffer = new byte [1024];
int read = in.read(buffer);
- assertEquals(bytes.length, read);
+ assertEquals(tableName.getName().length, read);
out.close();
in.close();
@@ -244,15 +246,15 @@ public class TestHLog {
HLog.Reader reader = null;
try {
- HRegionInfo info = new HRegionInfo(bytes,
+ HRegionInfo info = new HRegionInfo(tableName,
null,null, false);
HTableDescriptor htd = new HTableDescriptor();
- htd.addFamily(new HColumnDescriptor(bytes));
+ htd.addFamily(new HColumnDescriptor(tableName.getName()));
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
- kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
- wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
+ kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
+ wal.append(info, tableName, kvs, System.currentTimeMillis(), htd);
}
// Now call sync and try reading. Opening a Reader before you sync just
// gives you EOFE.
@@ -269,8 +271,8 @@ public class TestHLog {
// that has had a sync done on it.
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
- kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
- wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
+ kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
+ wal.append(info, tableName, kvs, System.currentTimeMillis(), htd);
}
reader = HLogFactory.createReader(fs, walPath, conf);
count = 0;
@@ -288,8 +290,8 @@ public class TestHLog {
final byte [] value = new byte[1025 * 1024]; // Make a 1M value.
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
- kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
- wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
+ kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value));
+ wal.append(info, tableName, kvs, System.currentTimeMillis(), htd);
}
// Now I should have written out lots of blocks. Sync then read.
wal.sync();
@@ -381,7 +383,8 @@ public class TestHLog {
*/
@Test (timeout=300000)
public void testAppendClose() throws Exception {
- byte [] tableName = Bytes.toBytes(getName());
+ TableName tableName =
+ TableName.valueOf(getName());
HRegionInfo regioninfo = new HRegionInfo(tableName,
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
@@ -390,11 +393,11 @@ public class TestHLog {
final int total = 20;
HTableDescriptor htd = new HTableDescriptor();
- htd.addFamily(new HColumnDescriptor(tableName));
+ htd.addFamily(new HColumnDescriptor(tableName.getName()));
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
- kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
+ kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
}
// Now call sync to send the data to HDFS datanodes
@@ -502,7 +505,8 @@ public class TestHLog {
@Test
public void testEditAdd() throws IOException {
final int COL_COUNT = 10;
- final byte [] tableName = Bytes.toBytes("tablename");
+ final TableName tableName =
+ TableName.valueOf("tablename");
final byte [] row = Bytes.toBytes("row");
HLog.Reader reader = null;
HLog log = null;
@@ -539,7 +543,7 @@ public class TestHLog {
HLogKey key = entry.getKey();
WALEdit val = entry.getEdit();
assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
- assertTrue(Bytes.equals(tableName, key.getTablename()));
+ assertTrue(tableName.equals(key.getTablename()));
KeyValue kv = val.getKeyValues().get(0);
assertTrue(Bytes.equals(row, kv.getRow()));
assertEquals((byte)(i + '0'), kv.getValue()[0]);
@@ -561,7 +565,8 @@ public class TestHLog {
@Test
public void testAppend() throws IOException {
final int COL_COUNT = 10;
- final byte [] tableName = Bytes.toBytes("tablename");
+ final TableName tableName =
+ TableName.valueOf("tablename");
final byte [] row = Bytes.toBytes("row");
Reader reader = null;
HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
@@ -593,7 +598,7 @@ public class TestHLog {
for (KeyValue val : entry.getEdit().getKeyValues()) {
assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
entry.getKey().getEncodedRegionName()));
- assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
+ assertTrue(tableName.equals(entry.getKey().getTablename()));
assertTrue(Bytes.equals(row, val.getRow()));
assertEquals((byte)(idx + '0'), val.getValue()[0]);
System.out.println(entry.getKey() + " " + val);
@@ -616,7 +621,8 @@ public class TestHLog {
@Test
public void testVisitors() throws Exception {
final int COL_COUNT = 10;
- final byte [] tableName = Bytes.toBytes("tablename");
+ final TableName tableName =
+ TableName.valueOf("tablename");
final byte [] row = Bytes.toBytes("row");
HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf);
try {
@@ -651,8 +657,10 @@ public class TestHLog {
@Test
public void testLogCleaning() throws Exception {
LOG.info("testLogCleaning");
- final byte [] tableName = Bytes.toBytes("testLogCleaning");
- final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
+ final TableName tableName =
+ TableName.valueOf("testLogCleaning");
+ final TableName tableName2 =
+ TableName.valueOf("testLogCleaning2");
HLog log = HLogFactory.createHLog(fs, hbaseDir,
getName(), conf);
@@ -749,7 +757,7 @@ public class TestHLog {
}
}
- private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
+ private void addEdits(HLog log, HRegionInfo hri, TableName tableName,
int times) throws IOException {
HTableDescriptor htd = new HTableDescriptor();
htd.addFamily(new HColumnDescriptor("row"));
@@ -771,7 +779,8 @@ public class TestHLog {
public void testReadLegacyLog() throws IOException {
final int columnCount = 5;
final int recordCount = 5;
- final byte[] tableName = Bytes.toBytes("tablename");
+ final TableName tableName =
+ TableName.valueOf("tablename");
final byte[] row = Bytes.toBytes("row");
long timestamp = System.currentTimeMillis();
Path path = new Path(dir, "temphlog");
@@ -809,7 +818,7 @@ public class TestHLog {
assertNotNull(entry);
assertEquals(columnCount, entry.getEdit().size());
assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
- assertArrayEquals(tableName, entry.getKey().getTablename());
+ assertEquals(tableName, entry.getKey().getTablename());
int idx = 0;
for (KeyValue val : entry.getEdit().getKeyValues()) {
assertTrue(Bytes.equals(row, val.getRow()));
@@ -854,7 +863,8 @@ public class TestHLog {
private void doRead(boolean withTrailer) throws IOException {
final int columnCount = 5;
final int recordCount = 5;
- final byte[] tableName = Bytes.toBytes("tablename");
+ final TableName tableName =
+ TableName.valueOf("tablename");
final byte[] row = Bytes.toBytes("row");
long timestamp = System.currentTimeMillis();
Path path = new Path(dir, "temphlog");
@@ -896,7 +906,7 @@ public class TestHLog {
assertNotNull(entry);
assertEquals(columnCount, entry.getEdit().size());
assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
- assertArrayEquals(tableName, entry.getKey().getTablename());
+ assertEquals(tableName, entry.getKey().getTablename());
int idx = 0;
for (KeyValue val : entry.getEdit().getKeyValues()) {
assertTrue(Bytes.equals(row, val.getRow()));
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java Thu Aug 8 04:19:49 2013
@@ -28,6 +28,7 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.client.Delete;
@@ -55,7 +56,8 @@ public class TestHLogFiltering {
private static final int NUM_MASTERS = 1;
private static final int NUM_RS = 4;
- private static final byte[] TABLE_NAME = Bytes.toBytes("TestHLogFiltering");
+ private static final TableName TABLE_NAME =
+ TableName.valueOf("TestHLogFiltering");
private static final byte[] CF1 = Bytes.toBytes("MyCF1");
private static final byte[] CF2 = Bytes.toBytes("MyCF2");
private static final byte[][] FAMILIES = { CF1, CF2 };
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java Thu Aug 8 04:19:49 2013
@@ -42,7 +42,8 @@ import static org.mockito.Mockito.mock;
@Category(SmallTests.class)
public class TestHLogMethods {
private static final byte[] TEST_REGION = Bytes.toBytes("test_region");;
- private static final byte[] TEST_TABLE = Bytes.toBytes("test_table");
+ private static final TableName TEST_TABLE =
+ TableName.valueOf("test_table");
private final HBaseTestingUtility util = new HBaseTestingUtility();
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java Thu Aug 8 04:19:49 2013
@@ -42,6 +42,11 @@ import java.util.concurrent.atomic.Atomi
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.log4j.Level;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -68,12 +73,8 @@ import org.apache.hadoop.hbase.util.FSUt
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.ipc.RemoteException;
-import org.apache.log4j.Level;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -114,14 +115,15 @@ public class TestHLogSplit {
private static final int NUM_WRITERS = 10;
private static final int ENTRIES = 10; // entries per writer per region
- private static final byte[] TABLE_NAME = "t1".getBytes();
+ private static final TableName TABLE_NAME =
+ TableName.valueOf("t1");
private static final byte[] FAMILY = "f1".getBytes();
private static final byte[] QUALIFIER = "q1".getBytes();
private static final byte[] VALUE = "v1".getBytes();
private static final String HLOG_FILE_PREFIX = "hlog.dat.";
private static List<String> REGIONS = new ArrayList<String>();
private static final String HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors";
- private static final Path TABLEDIR = new Path(HBASEDIR, Bytes.toString(TABLE_NAME));
+ private static final Path TABLEDIR = FSUtils.getTableDir(HBASEDIR, TABLE_NAME);
private static String ROBBER;
private static String ZOMBIE;
private static String [] GROUP = new String [] {"supergroup"};
@@ -336,14 +338,14 @@ public class TestHLogSplit {
public void testRecoveredEditsPathForMeta() throws IOException {
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
- Path tdir = new Path(HBASEDIR, Bytes.toString(HConstants.META_TABLE_NAME));
+ Path tdir = FSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME);
Path regiondir = new Path(tdir,
HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
fs.mkdirs(regiondir);
long now = System.currentTimeMillis();
HLog.Entry entry =
new HLog.Entry(new HLogKey(encoded,
- HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
+ TableName.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
new WALEdit());
Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, HBASEDIR, true);
String parentOfParent = p.getParent().getParent().getName();
@@ -358,14 +360,14 @@ public class TestHLogSplit {
public void testOldRecoveredEditsFileSidelined() throws IOException {
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
- Path tdir = new Path(HBASEDIR, Bytes.toString(HConstants.META_TABLE_NAME));
+ Path tdir = FSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME);
Path regiondir = new Path(tdir,
HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
fs.mkdirs(regiondir);
long now = System.currentTimeMillis();
HLog.Entry entry =
new HLog.Entry(new HLogKey(encoded,
- HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
+ TableName.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
new WALEdit());
Path parent = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
assertEquals(parent.getName(), HConstants.RECOVERED_EDITS_DIR);
@@ -767,9 +769,10 @@ public class TestHLogSplit {
HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf);
fs.rename(OLDLOGDIR, HLOGDIR);
- Path firstSplitPath = new Path(HBASEDIR, Bytes.toString(TABLE_NAME) + ".first");
- Path splitPath = new Path(HBASEDIR, Bytes.toString(TABLE_NAME));
- fs.rename(splitPath, firstSplitPath);
+ Path firstSplitPath = new Path(HBASEDIR, TABLE_NAME+ ".first");
+ Path splitPath = new Path(HBASEDIR, TABLE_NAME.getNameAsString());
+ fs.rename(splitPath,
+ firstSplitPath);
fs.initialize(fs.getUri(), conf);
HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf);
@@ -1081,7 +1084,8 @@ public class TestHLogSplit {
try {
// put some entries in an HLog
- byte [] tableName = Bytes.toBytes(this.getClass().getName());
+ TableName tableName =
+ TableName.valueOf(this.getClass().getName());
HRegionInfo regioninfo = new HRegionInfo(tableName,
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
log = HLogFactory.createHLog(fs, HBASEDIR, logName, conf);
@@ -1089,7 +1093,7 @@ public class TestHLogSplit {
final int total = 20;
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
- kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
+ kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("column"));
log.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
@@ -1151,7 +1155,7 @@ public class TestHLogSplit {
if (stop.get()) {
return;
}
- Path tableDir = new Path(HBASEDIR, new String(TABLE_NAME));
+ Path tableDir = FSUtils.getTableDir(HBASEDIR, TABLE_NAME);
Path regionDir = new Path(tableDir, REGIONS.get(0));
Path recoveredEdits = new Path(regionDir, HConstants.RECOVERED_EDITS_DIR);
String region = "juliet";
@@ -1166,7 +1170,7 @@ public class TestHLogSplit {
fs.mkdirs(new Path(tableDir, region));
HLog.Writer writer = HLogFactory.createWriter(fs,
julietLog, conf);
- appendEntry(writer, "juliet".getBytes(), ("juliet").getBytes(),
+ appendEntry(writer, TableName.valueOf("juliet"), ("juliet").getBytes(),
("r").getBytes(), FAMILY, QUALIFIER, VALUE, 0);
writer.close();
LOG.info("Juliet file creator: created file " + julietLog);
@@ -1224,7 +1228,7 @@ public class TestHLogSplit {
fs.initialize(fs.getUri(), conf);
HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf);
- Path tdir = HTableDescriptor.getTableDir(HBASEDIR, TABLE_NAME);
+ Path tdir = FSUtils.getTableDir(HBASEDIR, TABLE_NAME);
assertFalse(fs.exists(tdir));
assertEquals(0, countHLog(fs.listStatus(OLDLOGDIR)[0].getPath(), fs, conf));
@@ -1362,9 +1366,9 @@ public class TestHLogSplit {
return ws;
}
- private Path[] getLogForRegion(Path rootdir, byte[] table, String region)
+ private Path[] getLogForRegion(Path rootdir, TableName table, String region)
throws IOException {
- Path tdir = HTableDescriptor.getTableDir(rootdir, table);
+ Path tdir = FSUtils.getTableDir(rootdir, table);
@SuppressWarnings("deprecation")
Path editsdir = HLogUtil.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir,
Bytes.toString(region.getBytes())));
@@ -1475,7 +1479,7 @@ public class TestHLogSplit {
}
- public static long appendEntry(HLog.Writer writer, byte[] table, byte[] region,
+ public static long appendEntry(HLog.Writer writer, TableName table, byte[] region,
byte[] row, byte[] family, byte[] qualifier,
byte[] value, long seq)
throws IOException {
@@ -1487,7 +1491,7 @@ public class TestHLogSplit {
}
private static HLog.Entry createTestEntry(
- byte[] table, byte[] region,
+ TableName table, byte[] region,
byte[] row, byte[] family, byte[] qualifier,
byte[] value, long seq) {
long time = System.nanoTime();
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java Thu Aug 8 04:19:49 2013
@@ -119,7 +119,7 @@ public class TestLogRollAbort {
// When the META table can be opened, the region servers are running
new HTable(TEST_UTIL.getConfiguration(),
- HConstants.META_TABLE_NAME).close();
+ TableName.META_TABLE_NAME).close();
// Create the test table and open it
String tableName = this.getClass().getSimpleName();
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java Thu Aug 8 04:19:49 2013
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.HTableDes
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@@ -190,7 +191,7 @@ public class TestLogRolling {
private void startAndWriteData() throws IOException, InterruptedException {
// When the META table can be opened, the region servers are running
- new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
+ new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
this.server = cluster.getRegionServerThreads().get(0).getRegionServer();
this.log = server.getWAL();
@@ -330,7 +331,7 @@ public class TestLogRolling {
// Create the test table and open it
String tableName = getName();
- HTableDescriptor desc = new HTableDescriptor(tableName);
+ HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc);
@@ -426,14 +427,14 @@ public class TestLogRolling {
fs.getDefaultReplication() > 1);
LOG.info("Replication=" + fs.getDefaultReplication());
// When the META table can be opened, the region servers are running
- new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
+ new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
this.server = cluster.getRegionServer(0);
this.log = server.getWAL();
// Create the test table and open it
String tableName = getName();
- HTableDescriptor desc = new HTableDescriptor(tableName);
+ HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc);
@@ -583,7 +584,7 @@ public class TestLogRolling {
@Test
public void testCompactionRecordDoesntBlockRolling() throws Exception {
// When the META table can be opened, the region servers are running
- new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
+ new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
String tableName = getName();
HTable table = createTestTable(tableName);
@@ -593,9 +594,11 @@ public class TestLogRolling {
server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName));
this.log = server.getWAL();
FSHLog fshLog = (FSHLog)log;
- HRegion region = server.getOnlineRegions(table2.getTableName()).get(0);
+ HRegion region = server.getOnlineRegions(table2.getName()).get(0);
Store s = region.getStore(HConstants.CATALOG_FAMILY);
+ //have to flush namespace to ensure it doesn't affect wall tests
+ admin.flush(TableName.NAMESPACE_TABLE_NAME.getName());
// Put some stuff into table2, to make sure we have some files to compact.
for (int i = 1; i <= 2; ++i) {
@@ -641,7 +644,7 @@ public class TestLogRolling {
private HTable createTestTable(String tableName) throws IOException {
// Create the test table and open it
- HTableDescriptor desc = new HTableDescriptor(tableName);
+ HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(desc);
return new HTable(TEST_UTIL.getConfiguration(), tableName);
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java Thu Aug 8 04:19:49 2013
@@ -122,7 +122,7 @@ public class TestLogRollingNoCluster {
edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
this.wal.append(HRegionInfo.FIRST_META_REGIONINFO,
- HTableDescriptor.META_TABLEDESC.getName(),
+ HTableDescriptor.META_TABLEDESC.getTableName(),
edit, now, HTableDescriptor.META_TABLEDESC);
}
String msg = getName() + " finished";
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java?rev=1511577&r1=1511576&r2=1511577&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java Thu Aug 8 04:19:49 2013
@@ -89,7 +89,7 @@ public class TestWALActionsListener {
DummyWALActionsListener laterobserver = new DummyWALActionsListener();
HLog hlog = HLogFactory.createHLog(fs, TEST_UTIL.getDataTestDir(), logName,
conf, list, null);
- HRegionInfo hri = new HRegionInfo(SOME_BYTES,
+ HRegionInfo hri = new HRegionInfo(TableName.valueOf(SOME_BYTES),
SOME_BYTES, SOME_BYTES, false);
for (int i = 0; i < 20; i++) {
@@ -100,7 +100,7 @@ public class TestWALActionsListener {
HTableDescriptor htd = new HTableDescriptor();
htd.addFamily(new HColumnDescriptor(b));
- hlog.append(hri, b, edit, 0, htd);
+ hlog.append(hri, TableName.valueOf(b), edit, 0, htd);
if (i == 10) {
hlog.registerWALActionsListener(laterobserver);
}