You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2016/11/22 10:30:25 UTC
[2/4] hbase git commit: HBASE-17132 Cleanup deprecated code for WAL
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
index bed3240..20d48bc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
@@ -81,11 +81,10 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.AfterClass;
import org.junit.BeforeClass;
-import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-@Category({CoprocessorTests.class, MediumTests.class})
+@Category({ CoprocessorTests.class, MediumTests.class })
public class TestRegionObserverInterface {
private static final Log LOG = LogFactory.getLog(TestRegionObserverInterface.class);
@@ -104,8 +103,7 @@ public class TestRegionObserverInterface {
Configuration conf = util.getConfiguration();
conf.setBoolean("hbase.master.distributed.log.replay", true);
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
- "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver",
- "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver$Legacy");
+ SimpleRegionObserver.class.getName());
util.startMiniCluster();
cluster = util.getMiniHBaseCluster();
@@ -116,17 +114,18 @@ public class TestRegionObserverInterface {
util.shutdownMiniCluster();
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void testRegionObserver() throws IOException {
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRegionObserver");
// recreate table every time in order to reset the status of the
// coprocessor.
- Table table = util.createTable(tableName, new byte[][] {A, B, C});
+ Table table = util.createTable(tableName, new byte[][] { A, B, C });
try {
- verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
- "hadPrePut", "hadPostPut", "hadDelete", "hadPostStartRegionOperation",
- "hadPostCloseRegionOperation", "hadPostBatchMutateIndispensably" }, tableName,
- new Boolean[] { false, false, false, false, false, false, false, false });
+ verifyMethodResult(SimpleRegionObserver.class,
+ new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDelete",
+ "hadPostStartRegionOperation", "hadPostCloseRegionOperation",
+ "hadPostBatchMutateIndispensably" },
+ tableName, new Boolean[] { false, false, false, false, false, false, false, false });
Put put = new Put(ROW);
put.addColumn(A, A, A);
@@ -134,15 +133,16 @@ public class TestRegionObserverInterface {
put.addColumn(C, C, C);
table.put(put);
- verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
- "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete",
- "hadPostStartRegionOperation", "hadPostCloseRegionOperation",
- "hadPostBatchMutateIndispensably" }, TEST_TABLE, new Boolean[] { false, false, true,
- true, true, true, false, true, true, true });
+ verifyMethodResult(SimpleRegionObserver.class,
+ new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadPreBatchMutate",
+ "hadPostBatchMutate", "hadDelete", "hadPostStartRegionOperation",
+ "hadPostCloseRegionOperation", "hadPostBatchMutateIndispensably" },
+ TEST_TABLE,
+ new Boolean[] { false, false, true, true, true, true, false, true, true, true });
verifyMethodResult(SimpleRegionObserver.class,
- new String[] { "getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose" },
- tableName, new Integer[] { 1, 1, 0, 0 });
+ new String[] { "getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose" },
+ tableName, new Integer[] { 1, 1, 0, 0 });
Get get = new Get(ROW);
get.addColumn(A, A);
@@ -151,9 +151,9 @@ public class TestRegionObserverInterface {
table.get(get);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDelete",
- "hadPrePreparedDeleteTS" }, tableName,
- new Boolean[] { true, true, true, true, false, false });
+ new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDelete",
+ "hadPrePreparedDeleteTS" },
+ tableName, new Boolean[] { true, true, true, true, false, false });
Delete delete = new Delete(ROW);
delete.addColumn(A, A);
@@ -162,31 +162,26 @@ public class TestRegionObserverInterface {
table.delete(delete);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
- "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete", "hadPrePreparedDeleteTS"},
- tableName,
- new Boolean[] {true, true, true, true, true, true, true, true}
- );
+ new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadPreBatchMutate",
+ "hadPostBatchMutate", "hadDelete", "hadPrePreparedDeleteTS" },
+ tableName, new Boolean[] { true, true, true, true, true, true, true, true });
} finally {
util.deleteTable(tableName);
table.close();
}
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
- tableName,
- new Integer[] {1, 1, 1, 1});
+ new String[] { "getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose" },
+ tableName, new Integer[] { 1, 1, 1, 1 });
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void testRowMutation() throws IOException {
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation");
Table table = util.createTable(tableName, new byte[][] { A, B, C });
try {
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
- "hadDeleted"},
- tableName,
- new Boolean[] {false, false, false, false, false});
+ new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted" },
+ tableName, new Boolean[] { false, false, false, false, false });
Put put = new Put(ROW);
put.addColumn(A, A, A);
put.addColumn(B, B, B);
@@ -203,18 +198,15 @@ public class TestRegionObserverInterface {
table.mutateRow(arm);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
- "hadDeleted"},
- tableName,
- new Boolean[] {false, false, true, true, true}
- );
+ new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted" },
+ tableName, new Boolean[] { false, false, true, true, true });
} finally {
util.deleteTable(tableName);
table.close();
}
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void testIncrementHook() throws IOException {
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook");
Table table = util.createTable(tableName, new byte[][] { A, B, C });
@@ -223,80 +215,68 @@ public class TestRegionObserverInterface {
inc.addColumn(A, A, 1);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
- tableName,
- new Boolean[] {false, false, false}
- );
+ new String[] { "hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock" },
+ tableName, new Boolean[] { false, false, false });
table.increment(inc);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
- tableName,
- new Boolean[] {true, true, true}
- );
+ new String[] { "hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock" },
+ tableName, new Boolean[] { true, true, true });
} finally {
util.deleteTable(tableName);
table.close();
}
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void testCheckAndPutHooks() throws IOException {
- TableName tableName =
- TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks");
- try (Table table = util.createTable(tableName, new byte[][] {A, B, C})) {
+ TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks");
+ try (Table table = util.createTable(tableName, new byte[][] { A, B, C })) {
Put p = new Put(Bytes.toBytes(0));
p.addColumn(A, A, A);
table.put(p);
p = new Put(Bytes.toBytes(0));
p.addColumn(A, A, A);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] { "hadPreCheckAndPut", "hadPreCheckAndPutAfterRowLock",
- "hadPostCheckAndPut" }, tableName, new Boolean[] { false, false, false });
+ new String[] { "hadPreCheckAndPut", "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut" },
+ tableName, new Boolean[] { false, false, false });
table.checkAndPut(Bytes.toBytes(0), A, A, A, p);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreCheckAndPut",
- "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"},
- tableName,
- new Boolean[] {true, true, true}
- );
+ new String[] { "hadPreCheckAndPut", "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut" },
+ tableName, new Boolean[] { true, true, true });
} finally {
util.deleteTable(tableName);
}
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void testCheckAndDeleteHooks() throws IOException {
TableName tableName =
TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndDeleteHooks");
- Table table = util.createTable(tableName, new byte[][] {A, B, C});
+ Table table = util.createTable(tableName, new byte[][] { A, B, C });
try {
Put p = new Put(Bytes.toBytes(0));
p.addColumn(A, A, A);
table.put(p);
Delete d = new Delete(Bytes.toBytes(0));
table.delete(d);
- verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreCheckAndDelete",
- "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"},
- tableName,
- new Boolean[] {false, false, false}
- );
+ verifyMethodResult(
+ SimpleRegionObserver.class, new String[] { "hadPreCheckAndDelete",
+ "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete" },
+ tableName, new Boolean[] { false, false, false });
table.checkAndDelete(Bytes.toBytes(0), A, A, A, d);
- verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreCheckAndDelete",
- "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"},
- tableName,
- new Boolean[] {true, true, true}
- );
+ verifyMethodResult(
+ SimpleRegionObserver.class, new String[] { "hadPreCheckAndDelete",
+ "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete" },
+ tableName, new Boolean[] { true, true, true });
} finally {
util.deleteTable(tableName);
table.close();
}
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void testAppendHook() throws IOException {
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testAppendHook");
Table table = util.createTable(tableName, new byte[][] { A, B, C });
@@ -305,35 +285,30 @@ public class TestRegionObserverInterface {
app.add(A, A, A);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"},
- tableName,
- new Boolean[] {false, false, false}
- );
+ new String[] { "hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock" }, tableName,
+ new Boolean[] { false, false, false });
table.append(app);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"},
- tableName,
- new Boolean[] {true, true, true}
- );
+ new String[] { "hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock" }, tableName,
+ new Boolean[] { true, true, true });
} finally {
util.deleteTable(tableName);
table.close();
}
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
// HBase-3583
public void testHBase3583() throws IOException {
- TableName tableName =
- TableName.valueOf("testHBase3583");
- util.createTable(tableName, new byte[][] {A, B, C});
+ TableName tableName = TableName.valueOf("testHBase3583");
+ util.createTable(tableName, new byte[][] { A, B, C });
util.waitUntilAllRegionsAssigned(tableName);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] { "hadPreGet", "hadPostGet", "wasScannerNextCalled", "wasScannerCloseCalled" },
- tableName, new Boolean[] { false, false, false, false });
+ new String[] { "hadPreGet", "hadPostGet", "wasScannerNextCalled", "wasScannerCloseCalled" },
+ tableName, new Boolean[] { false, false, false, false });
Table table = util.getConnection().getTable(tableName);
Put put = new Put(ROW);
@@ -347,11 +322,8 @@ public class TestRegionObserverInterface {
// verify that scannerNext and scannerClose upcalls won't be invoked
// when we perform get().
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreGet", "hadPostGet", "wasScannerNextCalled",
- "wasScannerCloseCalled"},
- tableName,
- new Boolean[] {true, true, false, false}
- );
+ new String[] { "hadPreGet", "hadPostGet", "wasScannerNextCalled", "wasScannerCloseCalled" },
+ tableName, new Boolean[] { true, true, false, false });
Scan s = new Scan();
ResultScanner scanner = table.getScanner(s);
@@ -364,15 +336,13 @@ public class TestRegionObserverInterface {
// now scanner hooks should be invoked.
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"wasScannerNextCalled", "wasScannerCloseCalled"},
- tableName,
- new Boolean[] {true, true}
- );
+ new String[] { "wasScannerNextCalled", "wasScannerCloseCalled" }, tableName,
+ new Boolean[] { true, true });
util.deleteTable(tableName);
table.close();
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void testHBASE14489() throws IOException {
TableName tableName = TableName.valueOf("testHBASE14489");
Table table = util.createTable(tableName, new byte[][] { A });
@@ -396,18 +366,15 @@ public class TestRegionObserverInterface {
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
// HBase-3758
public void testHBase3758() throws IOException {
- TableName tableName =
- TableName.valueOf("testHBase3758");
- util.createTable(tableName, new byte[][] {A, B, C});
+ TableName tableName = TableName.valueOf("testHBase3758");
+ util.createTable(tableName, new byte[][] { A, B, C });
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadDeleted", "wasScannerOpenCalled"},
- tableName,
- new Boolean[] {false, false}
- );
+ new String[] { "hadDeleted", "wasScannerOpenCalled" }, tableName,
+ new Boolean[] { false, false });
Table table = util.getConnection().getTable(tableName);
Put put = new Put(ROW);
@@ -418,10 +385,8 @@ public class TestRegionObserverInterface {
table.delete(delete);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadDeleted", "wasScannerOpenCalled"},
- tableName,
- new Boolean[] {true, false}
- );
+ new String[] { "hadDeleted", "wasScannerOpenCalled" }, tableName,
+ new Boolean[] { true, false });
Scan s = new Scan();
ResultScanner scanner = table.getScanner(s);
@@ -433,11 +398,8 @@ public class TestRegionObserverInterface {
}
// now scanner hooks should be invoked.
- verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"wasScannerOpenCalled"},
- tableName,
- new Boolean[] {true}
- );
+ verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerOpenCalled" },
+ tableName, new Boolean[] { true });
util.deleteTable(tableName);
table.close();
}
@@ -448,8 +410,8 @@ public class TestRegionObserverInterface {
long lastFlush;
@Override
- public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
- Store store, final InternalScanner scanner, final ScanType scanType) {
+ public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
+ final InternalScanner scanner, final ScanType scanType) {
return new InternalScanner() {
@Override
public boolean next(List<Cell> results) throws IOException {
@@ -457,8 +419,7 @@ public class TestRegionObserverInterface {
}
@Override
- public boolean next(List<Cell> results, ScannerContext scannerContext)
- throws IOException {
+ public boolean next(List<Cell> results, ScannerContext scannerContext) throws IOException {
List<Cell> internalResults = new ArrayList<Cell>();
boolean hasMore;
do {
@@ -488,8 +449,8 @@ public class TestRegionObserverInterface {
}
@Override
- public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
- Store store, StoreFile resultFile) {
+ public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
+ StoreFile resultFile) {
lastCompaction = EnvironmentEdgeManager.currentTime();
}
@@ -498,14 +459,15 @@ public class TestRegionObserverInterface {
lastFlush = EnvironmentEdgeManager.currentTime();
}
}
+
/**
* Tests overriding compaction handling via coprocessor hooks
* @throws Exception
*/
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void testCompactionOverride() throws Exception {
TableName compactTable = TableName.valueOf("TestCompactionOverride");
- Admin admin = util.getHBaseAdmin();
+ Admin admin = util.getAdmin();
if (admin.tableExists(compactTable)) {
admin.disableTable(compactTable);
admin.deleteTable(compactTable);
@@ -517,7 +479,7 @@ public class TestRegionObserverInterface {
admin.createTable(htd);
Table table = util.getConnection().getTable(compactTable);
- for (long i=1; i<=10; i++) {
+ for (long i = 1; i <= 10; i++) {
byte[] iBytes = Bytes.toBytes(i);
Put put = new Put(iBytes);
put.setDurability(Durability.SKIP_WAL);
@@ -526,16 +488,16 @@ public class TestRegionObserverInterface {
}
HRegion firstRegion = cluster.getRegions(compactTable).get(0);
- Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(
- EvenOnlyCompactor.class.getName());
+ Coprocessor cp =
+ firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName());
assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
- EvenOnlyCompactor compactor = (EvenOnlyCompactor)cp;
+ EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;
// force a compaction
long ts = System.currentTimeMillis();
admin.flush(compactTable);
// wait for flush
- for (int i=0; i<10; i++) {
+ for (int i = 0; i < 10; i++) {
if (compactor.lastFlush >= ts) {
break;
}
@@ -547,25 +509,25 @@ public class TestRegionObserverInterface {
ts = compactor.lastFlush;
admin.majorCompact(compactTable);
// wait for compaction
- for (int i=0; i<30; i++) {
+ for (int i = 0; i < 30; i++) {
if (compactor.lastCompaction >= ts) {
break;
}
Thread.sleep(1000);
}
- LOG.debug("Last compaction was at "+compactor.lastCompaction);
+ LOG.debug("Last compaction was at " + compactor.lastCompaction);
assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
// only even rows should remain
ResultScanner scanner = table.getScanner(new Scan());
try {
- for (long i=2; i<=10; i+=2) {
+ for (long i = 2; i <= 10; i += 2) {
Result r = scanner.next();
assertNotNull(r);
assertFalse(r.isEmpty());
byte[] iBytes = Bytes.toBytes(i);
- assertArrayEquals("Row should be "+i, r.getRow(), iBytes);
- assertArrayEquals("Value should be "+i, r.getValue(A, A), iBytes);
+ assertArrayEquals("Row should be " + i, r.getRow(), iBytes);
+ assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes);
}
} finally {
scanner.close();
@@ -573,18 +535,16 @@ public class TestRegionObserverInterface {
table.close();
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void bulkLoadHFileTest() throws Exception {
- String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest";
+ String testName = TestRegionObserverInterface.class.getName() + ".bulkLoadHFileTest";
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".bulkLoadHFileTest");
Configuration conf = util.getConfiguration();
- Table table = util.createTable(tableName, new byte[][] {A, B, C});
+ Table table = util.createTable(tableName, new byte[][] { A, B, C });
try (RegionLocator locator = util.getConnection().getRegionLocator(tableName)) {
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
- tableName,
- new Boolean[] {false, false}
- );
+ new String[] { "hadPreBulkLoadHFile", "hadPostBulkLoadHFile" }, tableName,
+ new Boolean[] { false, false });
FileSystem fs = util.getTestFileSystem();
final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs);
@@ -593,32 +553,30 @@ public class TestRegionObserverInterface {
createHFile(util.getConfiguration(), fs, new Path(familyDir, Bytes.toString(A)), A, A);
// Bulk load
- new LoadIncrementalHFiles(conf).doBulkLoad(dir, util.getHBaseAdmin(), table, locator);
+ new LoadIncrementalHFiles(conf).doBulkLoad(dir, util.getAdmin(), table, locator);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
- tableName,
- new Boolean[] {true, true}
- );
+ new String[] { "hadPreBulkLoadHFile", "hadPostBulkLoadHFile" }, tableName,
+ new Boolean[] { true, true });
} finally {
util.deleteTable(tableName);
table.close();
}
}
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void testRecovery() throws Exception {
- LOG.info(TestRegionObserverInterface.class.getName() +".testRecovery");
+ LOG.info(TestRegionObserverInterface.class.getName() + ".testRecovery");
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRecovery");
- Table table = util.createTable(tableName, new byte[][] {A, B, C});
+ Table table = util.createTable(tableName, new byte[][] { A, B, C });
try (RegionLocator locator = util.getConnection().getRegionLocator(tableName)) {
JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
ServerName sn2 = rs1.getRegionServer().getServerName();
String regEN = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
- util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
- while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())){
+ util.getAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
+ while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())) {
Thread.sleep(100);
}
@@ -632,18 +590,14 @@ public class TestRegionObserverInterface {
table.put(put);
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
- "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete"},
- tableName,
- new Boolean[] {false, false, true, true, true, true, false}
- );
+ new String[] { "hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadPreBatchMutate",
+ "hadPostBatchMutate", "hadDelete" },
+ tableName, new Boolean[] { false, false, true, true, true, true, false });
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore",
- "getCtPostWALRestore", "getCtPrePut", "getCtPostPut",
- "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
- tableName,
- new Integer[] {0, 0, 0, 0, 2, 2, 0, 0});
+ new String[] { "getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore",
+ "getCtPostWALRestore", "getCtPrePut", "getCtPostPut" },
+ tableName, new Integer[] { 0, 0, 0, 0, 2, 2 });
cluster.killRegionServer(rs1.getRegionServer().getServerName());
Threads.sleep(1000); // Let the kill soak in.
@@ -651,75 +605,16 @@ public class TestRegionObserverInterface {
LOG.info("All regions assigned");
verifyMethodResult(SimpleRegionObserver.class,
- new String[] {"getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore",
- "getCtPostWALRestore", "getCtPrePut", "getCtPostPut",
- "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
- tableName,
- new Integer[]{1, 1, 2, 2, 0, 0, 0, 0});
+ new String[] { "getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore",
+ "getCtPostWALRestore", "getCtPrePut", "getCtPostPut" },
+ tableName, new Integer[] { 1, 1, 2, 2, 0, 0 });
} finally {
util.deleteTable(tableName);
table.close();
}
}
- @Test (timeout=300000)
- public void testLegacyRecovery() throws Exception {
- LOG.info(TestRegionObserverInterface.class.getName() +".testLegacyRecovery");
- TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testLegacyRecovery");
- Table table = util.createTable(tableName, new byte[][] {A, B, C});
- try {
- try (RegionLocator locator = util.getConnection().getRegionLocator(tableName)) {
- JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
- ServerName sn2 = rs1.getRegionServer().getServerName();
- String regEN = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-
- util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
- while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())) {
- Thread.sleep(100);
- }
-
- Put put = new Put(ROW);
- put.addColumn(A, A, A);
- put.addColumn(B, B, B);
- put.addColumn(C, C, C);
- table.put(put);
-
- // put two times
- table.put(put);
-
- verifyMethodResult(SimpleRegionObserver.Legacy.class,
- new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
- "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete"},
- tableName,
- new Boolean[] {false, false, true, true, true, true, false}
- );
-
- verifyMethodResult(SimpleRegionObserver.Legacy.class,
- new String[] {"getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore",
- "getCtPostWALRestore", "getCtPrePut", "getCtPostPut",
- "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
- tableName,
- new Integer[] {0, 0, 0, 0, 2, 2, 0, 0});
-
- cluster.killRegionServer(rs1.getRegionServer().getServerName());
- Threads.sleep(1000); // Let the kill soak in.
- util.waitUntilAllRegionsAssigned(tableName);
- LOG.info("All regions assigned");
-
- verifyMethodResult(SimpleRegionObserver.Legacy.class,
- new String[] {"getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore",
- "getCtPostWALRestore", "getCtPrePut", "getCtPostPut",
- "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
- tableName,
- new Integer[]{1, 1, 2, 2, 0, 0, 2, 2});
- }
- } finally {
- util.deleteTable(tableName);
- table.close();
- }
- }
-
- @Test (timeout=300000)
+ @Test(timeout = 300000)
public void testPreWALRestoreSkip() throws Exception {
LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip");
TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
@@ -730,7 +625,7 @@ public class TestRegionObserverInterface {
ServerName sn2 = rs1.getRegionServer().getServerName();
String regEN = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
- util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
+ util.getAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
while (!sn2.equals(locator.getAllRegionLocations().get(0).getServerName())) {
Thread.sleep(100);
}
@@ -746,10 +641,9 @@ public class TestRegionObserverInterface {
util.waitUntilAllRegionsAssigned(tableName);
}
- verifyMethodResult(SimpleRegionObserver.class, new String[] { "getCtPreWALRestore",
- "getCtPostWALRestore", "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
- tableName,
- new Integer[] {0, 0, 0, 0});
+ verifyMethodResult(SimpleRegionObserver.class,
+ new String[] { "getCtPreWALRestore", "getCtPostWALRestore", }, tableName,
+ new Integer[] { 0, 0 });
util.deleteTable(tableName);
table.close();
@@ -757,27 +651,28 @@ public class TestRegionObserverInterface {
// check each region whether the coprocessor upcalls are called or not.
private void verifyMethodResult(Class<?> c, String methodName[], TableName tableName,
- Object value[]) throws IOException {
+ Object value[]) throws IOException {
try {
for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
- if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()){
+ if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()) {
continue;
}
- for (HRegionInfo r : ProtobufUtil.getOnlineRegions(t.getRegionServer().getRSRpcServices())) {
+ for (HRegionInfo r : ProtobufUtil
+ .getOnlineRegions(t.getRegionServer().getRSRpcServices())) {
if (!r.getTable().equals(tableName)) {
continue;
}
- RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).
- getCoprocessorHost();
+ RegionCoprocessorHost cph =
+ t.getRegionServer().getOnlineRegion(r.getRegionName()).getCoprocessorHost();
Coprocessor cp = cph.findCoprocessor(c.getName());
assertNotNull(cp);
for (int i = 0; i < methodName.length; ++i) {
Method m = c.getMethod(methodName[i]);
Object o = m.invoke(cp);
- assertTrue("Result of " + c.getName() + "." + methodName[i]
- + " is expected to be " + value[i].toString()
- + ", while we get " + o.toString(), o.equals(value[i]));
+ assertTrue("Result of " + c.getName() + "." + methodName[i] + " is expected to be "
+ + value[i].toString() + ", while we get " + o.toString(),
+ o.equals(value[i]));
}
}
}
@@ -786,19 +681,16 @@ public class TestRegionObserverInterface {
}
}
- private static void createHFile(
- Configuration conf,
- FileSystem fs, Path path,
- byte[] family, byte[] qualifier) throws IOException {
+ private static void createHFile(Configuration conf, FileSystem fs, Path path, byte[] family,
+ byte[] qualifier) throws IOException {
HFileContext context = new HFileContextBuilder().build();
- HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
- .withPath(fs, path)
- .withFileContext(context)
- .create();
+ HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
+ .withFileContext(context).create();
long now = System.currentTimeMillis();
try {
- for (int i =1;i<=9;i++) {
- KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
+ for (int i = 1; i <= 9; i++) {
+ KeyValue kv =
+ new KeyValue(Bytes.toBytes(i + ""), family, qualifier, now, Bytes.toBytes(i + ""));
writer.append(kv);
}
} finally {
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
index 3c591f8..6eca7f0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.security.User;
@@ -110,7 +109,7 @@ public class TestWALObserver {
public static void setupBeforeClass() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
- SampleRegionWALObserver.class.getName(), SampleRegionWALObserver.Legacy.class.getName());
+ SampleRegionWALObserver.class.getName());
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
SampleRegionWALObserver.class.getName());
conf.setInt("dfs.client.block.recovery.retries", 2);
@@ -169,17 +168,6 @@ public class TestWALObserver {
verifyWritesSeen(log, getCoprocessor(log, SampleRegionWALObserver.class), false);
}
- /**
- * Test WAL write behavior with WALObserver. The coprocessor monitors a
- * WALEdit written to WAL, and ignore, modify, and add KeyValue's for the
- * WALEdit.
- */
- @Test
- public void testLegacyWALObserverWriteToWAL() throws Exception {
- final WAL log = wals.getWAL(UNSPECIFIED_REGION, null);
- verifyWritesSeen(log, getCoprocessor(log, SampleRegionWALObserver.Legacy.class), true);
- }
-
private void verifyWritesSeen(final WAL log, final SampleRegionWALObserver cp,
final boolean seesLegacy) throws Exception {
HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE));
@@ -202,8 +190,6 @@ public class TestWALObserver {
assertFalse(cp.isPreWALWriteCalled());
assertFalse(cp.isPostWALWriteCalled());
- assertFalse(cp.isPreWALWriteDeprecatedCalled());
- assertFalse(cp.isPostWALWriteDeprecatedCalled());
// TEST_FAMILY[2] is not in the put, however it shall be added by the tested
// coprocessor.
@@ -241,7 +227,7 @@ public class TestWALObserver {
long now = EnvironmentEdgeManager.currentTime();
// we use HLogKey here instead of WALKey directly to support legacy coprocessors.
long txid = log.append(hri,
- new HLogKey(hri.getEncodedNameAsBytes(), hri.getTable(), now, scopes), edit, true);
+ new WALKey(hri.getEncodedNameAsBytes(), hri.getTable(), now, scopes), edit, true);
log.sync(txid);
// the edit shall have been change now by the coprocessor.
@@ -267,88 +253,6 @@ public class TestWALObserver {
assertTrue(cp.isPreWALWriteCalled());
assertTrue(cp.isPostWALWriteCalled());
- assertEquals(seesLegacy, cp.isPreWALWriteDeprecatedCalled());
- assertEquals(seesLegacy, cp.isPostWALWriteDeprecatedCalled());
- }
-
- @Test
- public void testNonLegacyWALKeysDoNotExplode() throws Exception {
- TableName tableName = TableName.valueOf(TEST_TABLE);
- final HTableDescriptor htd = createBasic3FamilyHTD(Bytes
- .toString(TEST_TABLE));
- final HRegionInfo hri = new HRegionInfo(tableName, null, null);
- MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
-
- fs.mkdirs(new Path(FSUtils.getTableDir(hbaseRootDir, tableName), hri.getEncodedName()));
-
- final Configuration newConf = HBaseConfiguration.create(this.conf);
-
- final WAL wal = wals.getWAL(UNSPECIFIED_REGION, null);
- final SampleRegionWALObserver newApi = getCoprocessor(wal, SampleRegionWALObserver.class);
- newApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);
- final SampleRegionWALObserver oldApi = getCoprocessor(wal,
- SampleRegionWALObserver.Legacy.class);
- oldApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);
-
- LOG.debug("ensuring wal entries haven't happened before we start");
- assertFalse(newApi.isPreWALWriteCalled());
- assertFalse(newApi.isPostWALWriteCalled());
- assertFalse(newApi.isPreWALWriteDeprecatedCalled());
- assertFalse(newApi.isPostWALWriteDeprecatedCalled());
- assertFalse(oldApi.isPreWALWriteCalled());
- assertFalse(oldApi.isPostWALWriteCalled());
- assertFalse(oldApi.isPreWALWriteDeprecatedCalled());
- assertFalse(oldApi.isPostWALWriteDeprecatedCalled());
-
- LOG.debug("writing to WAL with non-legacy keys.");
- NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(
- Bytes.BYTES_COMPARATOR);
- for (HColumnDescriptor hcd : htd.getFamilies()) {
- scopes.put(hcd.getName(), 0);
- }
- final int countPerFamily = 5;
- for (HColumnDescriptor hcd : htd.getFamilies()) {
- addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
- EnvironmentEdgeManager.getDelegate(), wal, scopes, mvcc);
- }
-
- LOG.debug("Verify that only the non-legacy CP saw edits.");
- assertTrue(newApi.isPreWALWriteCalled());
- assertTrue(newApi.isPostWALWriteCalled());
- assertFalse(newApi.isPreWALWriteDeprecatedCalled());
- assertFalse(newApi.isPostWALWriteDeprecatedCalled());
- // wish we could test that the log message happened :/
- assertFalse(oldApi.isPreWALWriteCalled());
- assertFalse(oldApi.isPostWALWriteCalled());
- assertFalse(oldApi.isPreWALWriteDeprecatedCalled());
- assertFalse(oldApi.isPostWALWriteDeprecatedCalled());
-
- LOG.debug("reseting cp state.");
- newApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);
- oldApi.setTestValues(TEST_TABLE, TEST_ROW, null, null, null, null, null, null);
-
- LOG.debug("write a log edit that supports legacy cps.");
- final long now = EnvironmentEdgeManager.currentTime();
- final WALKey legacyKey = new HLogKey(hri.getEncodedNameAsBytes(), hri.getTable(), now);
- final WALEdit edit = new WALEdit();
- final byte[] nonce = Bytes.toBytes("1772");
- edit.add(new KeyValue(TEST_ROW, TEST_FAMILY[0], nonce, now, nonce));
- final long txid = wal.append(hri, legacyKey, edit, true);
- wal.sync(txid);
-
- LOG.debug("Make sure legacy cps can see supported edits after having been skipped.");
- assertTrue("non-legacy WALObserver didn't see pre-write.", newApi.isPreWALWriteCalled());
- assertTrue("non-legacy WALObserver didn't see post-write.", newApi.isPostWALWriteCalled());
- assertFalse("non-legacy WALObserver shouldn't have seen legacy pre-write.",
- newApi.isPreWALWriteDeprecatedCalled());
- assertFalse("non-legacy WALObserver shouldn't have seen legacy post-write.",
- newApi.isPostWALWriteDeprecatedCalled());
- assertTrue("legacy WALObserver didn't see pre-write.", oldApi.isPreWALWriteCalled());
- assertTrue("legacy WALObserver didn't see post-write.", oldApi.isPostWALWriteCalled());
- assertTrue("legacy WALObserver didn't see legacy pre-write.",
- oldApi.isPreWALWriteDeprecatedCalled());
- assertTrue("legacy WALObserver didn't see legacy post-write.",
- oldApi.isPostWALWriteDeprecatedCalled());
}
/**
@@ -431,8 +335,8 @@ public class TestWALObserver {
User user = HBaseTestingUtility.getDifferentUser(newConf,
".replay.wal.secondtime");
- user.runAs(new PrivilegedExceptionAction() {
- public Object run() throws Exception {
+ user.runAs(new PrivilegedExceptionAction<Void>() {
+ public Void run() throws Exception {
Path p = runWALSplit(newConf);
LOG.info("WALSplit path == " + p);
FileSystem newFS = FileSystem.get(newConf);
@@ -450,8 +354,6 @@ public class TestWALObserver {
assertNotNull(cp2);
assertTrue(cp2.isPreWALRestoreCalled());
assertTrue(cp2.isPostWALRestoreCalled());
- assertFalse(cp2.isPreWALRestoreDeprecatedCalled());
- assertFalse(cp2.isPostWALRestoreDeprecatedCalled());
region.close();
wals2.close();
return null;
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java
deleted file mode 100644
index 752faa6..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.mapreduce;
-
-import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALRecordReader;
-
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.mapreduce.HLogInputFormat.HLogKeyRecordReader;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
-import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.hadoop.hbase.testclassification.MapReduceTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.junit.experimental.categories.Category;
-
-/**
- * JUnit tests for the record reader in HLogInputFormat
- */
-@Category({MapReduceTests.class, MediumTests.class})
-public class TestHLogRecordReader extends TestWALRecordReader {
-
- @Override
- protected WALKey getWalKey(final long time, NavigableMap<byte[], Integer> scopes) {
- return new HLogKey(info.getEncodedNameAsBytes(), tableName, time, mvcc, scopes);
- }
-
- @Override
- protected WALRecordReader getReader() {
- return new HLogKeyRecordReader();
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index e34c9cd..5078038 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -101,9 +100,9 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-import org.apache.hadoop.hbase.wal.FSHLogProvider;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -1308,8 +1307,8 @@ public class TestDistributedLogSplitting {
WALEdit e = new WALEdit();
value++;
e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
- wal.append(curRegionInfo, new HLogKey(curRegionInfo.getEncodedNameAsBytes(), tableName,
- System.currentTimeMillis(), null), e, true);
+ wal.append(curRegionInfo, new WALKey(curRegionInfo.getEncodedNameAsBytes(), tableName,
+ System.currentTimeMillis()), e, true);
}
wal.sync();
wal.shutdown();
@@ -1403,7 +1402,7 @@ public class TestDistributedLogSplitting {
WALEdit e = new WALEdit();
value++;
e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
- wal.append(curRegionInfo, new HLogKey(curRegionInfo.getEncodedNameAsBytes(),
+ wal.append(curRegionInfo, new WALKey(curRegionInfo.getEncodedNameAsBytes(),
tableName, System.currentTimeMillis()), e, true);
}
wal.sync();
@@ -1617,7 +1616,7 @@ public class TestDistributedLogSplitting {
byte[] qualifier = Bytes.toBytes("c" + Integer.toString(i));
e.add(new KeyValue(row, family, qualifier, System.currentTimeMillis(), value));
log.append(curRegionInfo,
- new HLogKey(curRegionInfo.getEncodedNameAsBytes(), fullTName,
+ new WALKey(curRegionInfo.getEncodedNameAsBytes(), fullTName,
System.currentTimeMillis()), e, true);
if (0 == i % syncEvery) {
log.sync();
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
index ad88cfe..73fb9cf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFailedAppendAndSync.java
@@ -150,7 +150,7 @@ public class TestFailedAppendAndSync {
}
@Override
- public long getLength() throws IOException {
+ public long getLength() {
return w.getLength();
}
};
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 4c8b1de..bd1ec5c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -43,6 +43,10 @@ import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
import java.io.IOException;
import java.io.InterruptedIOException;
import java.security.PrivilegedExceptionAction;
@@ -127,19 +131,11 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
import org.apache.hadoop.hbase.regionserver.Region.RowLock;
import org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem;
import org.apache.hadoop.hbase.regionserver.handler.FinishRegionRecoveringHandler;
import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
@@ -147,6 +143,13 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
@@ -181,10 +184,6 @@ import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
/**
* Basic stand-alone testing of HRegion. No clusters!
*
@@ -695,7 +694,7 @@ public class TestHRegion {
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
.toBytes(i)));
- writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
+ writer.append(new WAL.Entry(new WALKey(regionName, tableName, i, time,
HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
@@ -746,7 +745,7 @@ public class TestHRegion {
WALEdit edit = new WALEdit();
edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
.toBytes(i)));
- writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
+ writer.append(new WAL.Entry(new WALKey(regionName, tableName, i, time,
HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
@@ -849,7 +848,7 @@ public class TestHRegion {
edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
.toBytes(i)));
}
- writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
+ writer.append(new WAL.Entry(new WALKey(regionName, tableName, i, time,
HConstants.DEFAULT_CLUSTER_ID), edit));
writer.close();
}
@@ -946,7 +945,7 @@ public class TestHRegion {
long time = System.nanoTime();
- writer.append(new WAL.Entry(new HLogKey(regionName, tableName, 10, time,
+ writer.append(new WAL.Entry(new WALKey(regionName, tableName, 10, time,
HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(region.getRegionInfo(),
compactionDescriptor)));
writer.close();
@@ -1187,7 +1186,7 @@ public class TestHRegion {
}
@Override
- public long getLength() throws IOException {
+ public long getLength() {
return w.getLength();
}
};
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index 31f9a42..188016c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -190,7 +190,7 @@ public class TestWALLockup {
}
@Override
- public long getLength() throws IOException {
+ public long getLength() {
return w.getLength();
}
};
@@ -228,7 +228,7 @@ public class TestWALLockup {
Put put = new Put(bytes);
put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), bytes);
WALKey key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(), htd.getTableName(),
- scopes);
+ System.currentTimeMillis(), scopes);
WALEdit edit = new WALEdit();
CellScanner CellScanner = put.cellScanner();
assertTrue(CellScanner.advance());
@@ -349,7 +349,7 @@ public class TestWALLockup {
}
@Override
- public long getLength() throws IOException {
+ public long getLength() {
return w.getLength();
}
};
@@ -403,8 +403,8 @@ public class TestWALLockup {
try {
Put put = new Put(bytes);
put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), bytes);
- WALKey key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(),
- htd.getTableName(), scopes);
+ WALKey key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(), htd.getTableName(),
+ System.currentTimeMillis(), scopes);
WALEdit edit = new WALEdit();
CellScanner CellScanner = put.cellScanner();
assertTrue(CellScanner.advance());
@@ -435,8 +435,8 @@ public class TestWALLockup {
// make RingBufferEventHandler sleep 1s, so the following sync
// endOfBatch=false
- key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(),
- TableName.valueOf("sleep"), scopes);
+ key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(), TableName.valueOf("sleep"),
+ System.currentTimeMillis(), scopes);
dodgyWAL2.append(region.getRegionInfo(), key, edit, true);
Thread t = new Thread("Sync") {
@@ -460,7 +460,7 @@ public class TestWALLockup {
}
// make append throw DamagedWALException
key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(),
- TableName.valueOf("DamagedWALException"), scopes);
+ TableName.valueOf("DamagedWALException"), System.currentTimeMillis(), scopes);
dodgyWAL2.append(region.getRegionInfo(), key, edit, true);
while (latch.getCount() > 0) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java
new file mode 100644
index 0000000..c654c16
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyProtobufLogReader.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.Queue;
+
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WALKey;
+
+public class FaultyProtobufLogReader extends ProtobufLogReader {
+
+ // public until class relocates to o.a.h.h.wal
+ public enum FailureType {
+ BEGINNING, MIDDLE, END, NONE
+ }
+
+ Queue<Entry> nextQueue = new LinkedList<Entry>();
+ int numberOfFileEntries = 0;
+
+ FailureType getFailureType() {
+ return FailureType.valueOf(conf.get("faultyprotobuflogreader.failuretype", "NONE"));
+ }
+
+ @Override
+ public Entry next(Entry reuse) throws IOException {
+ if (nextQueue.isEmpty()) { // Read the whole thing at once and fake reading
+ boolean b;
+ do {
+ Entry e = new Entry(new WALKey(), new WALEdit());
+ if (compressionContext != null) {
+ e.setCompressionContext(compressionContext);
+ }
+ b = readNext(e);
+ nextQueue.offer(e);
+ numberOfFileEntries++;
+ } while (b);
+ }
+
+ if (nextQueue.size() == this.numberOfFileEntries && getFailureType() == FailureType.BEGINNING) {
+ throw new IOException("fake Exception");
+ } else if (nextQueue.size() == this.numberOfFileEntries / 2
+ && getFailureType() == FailureType.MIDDLE) {
+ throw new IOException("fake Exception");
+ } else if (nextQueue.size() == 1 && getFailureType() == FailureType.END) {
+ throw new IOException("fake Exception");
+ }
+
+ if (nextQueue.peek() != null) {
+ edit++;
+ }
+
+ Entry e = nextQueue.poll();
+
+ if (e.getEdit().isEmpty()) {
+ return null;
+ }
+ return e;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java
deleted file mode 100644
index a0e4490..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.wal;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.Queue;
-
-import org.apache.hadoop.hbase.wal.WAL.Entry;
-
-public class FaultySequenceFileLogReader extends SequenceFileLogReader {
-
- // public until class relocates to o.a.h.h.wal
- public enum FailureType {
- BEGINNING, MIDDLE, END, NONE
- }
-
- Queue<Entry> nextQueue = new LinkedList<Entry>();
- int numberOfFileEntries = 0;
-
- FailureType getFailureType() {
- return FailureType.valueOf(conf.get("faultysequencefilelogreader.failuretype", "NONE"));
- }
-
- @Override
- public Entry next(Entry reuse) throws IOException {
- this.entryStart = this.getPosition();
- boolean b = true;
-
- if (nextQueue.isEmpty()) { // Read the whole thing at once and fake reading
- while (b == true) {
- Entry e = new Entry(new HLogKey(), new WALEdit());
- if (compressionContext != null) {
- e.setCompressionContext(compressionContext);
- }
- b = readNext(e);
- nextQueue.offer(e);
- numberOfFileEntries++;
- }
- }
-
- if (nextQueue.size() == this.numberOfFileEntries
- && getFailureType() == FailureType.BEGINNING) {
- throw this.addFileInfoToException(new IOException("fake Exception"));
- } else if (nextQueue.size() == this.numberOfFileEntries / 2
- && getFailureType() == FailureType.MIDDLE) {
- throw this.addFileInfoToException(new IOException("fake Exception"));
- } else if (nextQueue.size() == 1 && getFailureType() == FailureType.END) {
- throw this.addFileInfoToException(new IOException("fake Exception"));
- }
-
- if (nextQueue.peek() != null) {
- edit++;
- }
-
- Entry e = nextQueue.poll();
-
- if (e.getEdit().isEmpty()) {
- return null;
- }
- return e;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
deleted file mode 100644
index 101758e..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.wal;
-
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.util.LRUDictionary;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.wal.FSHLogProvider;
-import org.apache.hadoop.hbase.wal.WAL;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.io.SequenceFile.Metadata;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.DefaultCodec;
-
-/**
- * Implementation of {@link WALProvider.Writer} that delegates to
- * SequenceFile.Writer. Legacy implementation only used for compat tests.
- *
- * Note that because this class writes to the legacy hadoop-specific SequenceFile
- * format, users of it must write {@link HLogKey} keys and not arbitrary
- * {@link WALKey}s because the latter are not Writables (nor made to work with
- * Hadoop serialization).
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class SequenceFileLogWriter implements FSHLogProvider.Writer {
- private static final Log LOG = LogFactory.getLog(SequenceFileLogWriter.class);
- // The sequence file we delegate to.
- private SequenceFile.Writer writer;
- // This is the FSDataOutputStream instance that is the 'out' instance
- // in the SequenceFile.Writer 'writer' instance above.
- private FSDataOutputStream writer_out;
-
- private CompressionContext compressionContext;
-
- // Legacy stuff from pre-PB WAL metadata.
- private static final Text WAL_VERSION_KEY = new Text("version");
- private static final Text WAL_COMPRESSION_TYPE_KEY = new Text("compression.type");
- private static final Text DICTIONARY_COMPRESSION_TYPE = new Text("dictionary");
-
- /**
- * Default constructor.
- */
- public SequenceFileLogWriter() {
- super();
- }
- /**
- * Create sequence file Metadata for our WAL file with version and compression
- * type (if any).
- * @param conf
- * @param compress
- * @return Metadata instance.
- */
- private static Metadata createMetadata(final Configuration conf,
- final boolean compress) {
- TreeMap<Text, Text> metaMap = new TreeMap<Text, Text>();
- metaMap.put(WAL_VERSION_KEY, new Text("1"));
- if (compress) {
- // Currently we only do one compression type.
- metaMap.put(WAL_COMPRESSION_TYPE_KEY, DICTIONARY_COMPRESSION_TYPE);
- }
- return new Metadata(metaMap);
- }
-
- private boolean initializeCompressionContext(Configuration conf, Path path) throws IOException {
- boolean doCompress = conf.getBoolean(HConstants.ENABLE_WAL_COMPRESSION, false);
- if (doCompress) {
- try {
- this.compressionContext = new CompressionContext(LRUDictionary.class,
- FSUtils.isRecoveredEdits(path), conf.getBoolean(
- CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true));
- } catch (Exception e) {
- throw new IOException("Failed to initiate CompressionContext", e);
- }
- }
- return doCompress;
- }
-
- @Override
- public void init(FileSystem fs, Path path, Configuration conf, boolean overwritable)
- throws IOException {
- boolean compress = initializeCompressionContext(conf, path);
- // Create a SF.Writer instance.
- try {
- // reflection for a version of SequenceFile.createWriter that doesn't
- // automatically create the parent directory (see HBASE-2312)
- this.writer = (SequenceFile.Writer) SequenceFile.class
- .getMethod("createWriter", new Class[] {FileSystem.class,
- Configuration.class, Path.class, Class.class, Class.class,
- Integer.TYPE, Short.TYPE, Long.TYPE, Boolean.TYPE,
- CompressionType.class, CompressionCodec.class, Metadata.class})
- .invoke(null, new Object[] {fs, conf, path, HLogKey.class, WALEdit.class,
- Integer.valueOf(FSUtils.getDefaultBufferSize(fs)),
- Short.valueOf((short)
- conf.getInt("hbase.regionserver.hlog.replication",
- FSUtils.getDefaultReplication(fs, path))),
- Long.valueOf(conf.getLong("hbase.regionserver.hlog.blocksize",
- FSUtils.getDefaultBlockSize(fs, path))),
- Boolean.valueOf(false) /*createParent*/,
- SequenceFile.CompressionType.NONE, new DefaultCodec(),
- createMetadata(conf, compress)
- });
- } catch (InvocationTargetException ite) {
- // function was properly called, but threw it's own exception
- throw new IOException(ite.getCause());
- } catch (Exception e) {
- // ignore all other exceptions. related to reflection failure
- }
-
- // if reflection failed, use the old createWriter
- if (this.writer == null) {
- LOG.debug("new createWriter -- HADOOP-6840 -- not available");
- this.writer = SequenceFile.createWriter(fs, conf, path,
- HLogKey.class, WALEdit.class,
- FSUtils.getDefaultBufferSize(fs),
- (short) conf.getInt("hbase.regionserver.hlog.replication",
- FSUtils.getDefaultReplication(fs, path)),
- conf.getLong("hbase.regionserver.hlog.blocksize",
- FSUtils.getDefaultBlockSize(fs, path)),
- SequenceFile.CompressionType.NONE,
- new DefaultCodec(),
- null,
- createMetadata(conf, compress));
- } else {
- if (LOG.isTraceEnabled()) LOG.trace("Using new createWriter -- HADOOP-6840");
- }
-
- this.writer_out = getSequenceFilePrivateFSDataOutputStreamAccessible();
- if (LOG.isTraceEnabled()) LOG.trace("Path=" + path + ", compression=" + compress);
- }
-
- // Get at the private FSDataOutputStream inside in SequenceFile so we can
- // call sync on it. Make it accessible.
- private FSDataOutputStream getSequenceFilePrivateFSDataOutputStreamAccessible()
- throws IOException {
- FSDataOutputStream out = null;
- final Field fields [] = this.writer.getClass().getDeclaredFields();
- final String fieldName = "out";
- for (int i = 0; i < fields.length; ++i) {
- if (fieldName.equals(fields[i].getName())) {
- try {
- // Make the 'out' field up in SF.Writer accessible.
- fields[i].setAccessible(true);
- out = (FSDataOutputStream)fields[i].get(this.writer);
- break;
- } catch (IllegalAccessException ex) {
- throw new IOException("Accessing " + fieldName, ex);
- } catch (SecurityException e) {
- LOG.warn("Does not have access to out field from FSDataOutputStream",
- e);
- }
- }
- }
- return out;
- }
-
- @Override
- public void append(WAL.Entry entry) throws IOException {
- entry.setCompressionContext(compressionContext);
- try {
- this.writer.append(entry.getKey(), entry.getEdit());
- } catch (NullPointerException npe) {
- // Concurrent close...
- throw new IOException(npe);
- }
- }
-
- @Override
- public void close() throws IOException {
- if (this.writer != null) {
- try {
- this.writer.close();
- } catch (NullPointerException npe) {
- // Can get a NPE coming up from down in DFSClient$DFSOutputStream#close
- LOG.warn(npe);
- }
- this.writer = null;
- }
- }
-
- @Override
- public void sync() throws IOException {
- try {
- this.writer.syncFs();
- } catch (NullPointerException npe) {
- // Concurrent close...
- throw new IOException(npe);
- }
- }
-
- @Override
- public long getLength() throws IOException {
- try {
- return this.writer.getLength();
- } catch (NullPointerException npe) {
- // Concurrent close...
- throw new IOException(npe);
- }
- }
-
- /**
- * @return The dfsclient out stream up inside SF.Writer made accessible, or
- * null if not available.
- */
- public FSDataOutputStream getWriterFSDataOutputStream() {
- return this.writer_out;
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestReadOldRootAndMetaEdits.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestReadOldRootAndMetaEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestReadOldRootAndMetaEdits.java
deleted file mode 100644
index 8bdb33c..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestReadOldRootAndMetaEdits.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver.wal;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.wal.WAL;
-import org.apache.hadoop.hbase.wal.WALFactory;
-import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.hadoop.hbase.wal.WALProvider;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Tests to read old ROOT, Meta edits.
- */
-@Category({RegionServerTests.class, MediumTests.class})
-
-public class TestReadOldRootAndMetaEdits {
-
- private final static Log LOG = LogFactory.getLog(TestReadOldRootAndMetaEdits.class);
- private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static Configuration conf;
- private static FileSystem fs;
- private static Path dir;
-
- @BeforeClass
- public static void setupBeforeClass() throws Exception {
- conf = TEST_UTIL.getConfiguration();
- conf.setClass("hbase.regionserver.hlog.writer.impl",
- SequenceFileLogWriter.class, WALProvider.Writer.class);
- fs = TEST_UTIL.getTestFileSystem();
- dir = new Path(TEST_UTIL.createRootDir(), "testReadOldRootAndMetaEdits");
- fs.mkdirs(dir);
-
- }
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- }
-
- /**
- * Inserts three waledits in the wal file, and reads them back. The first edit is of a regular
- * table, second waledit is for the ROOT table (it will be ignored while reading),
- * and last waledit is for the hbase:meta table, which will be linked to the new system:meta table.
- * @throws IOException
- */
- @Test
- public void testReadOldRootAndMetaEdits() throws IOException {
- LOG.debug("testReadOldRootAndMetaEdits");
- // kv list to be used for all WALEdits.
- byte[] row = Bytes.toBytes("row");
- KeyValue kv = new KeyValue(row, row, row, row);
- List<KeyValue> kvs = new ArrayList<KeyValue>();
- kvs.add(kv);
-
- WALProvider.Writer writer = null;
- WAL.Reader reader = null;
- // a regular table
- TableName t = TableName.valueOf("t");
- HRegionInfo tRegionInfo = null;
- int logCount = 0;
- long timestamp = System.currentTimeMillis();
- Path path = new Path(dir, "t");
- try {
- tRegionInfo = new HRegionInfo(t, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
- WAL.Entry tEntry = createAEntry(new HLogKey(tRegionInfo.getEncodedNameAsBytes(), t,
- ++logCount, timestamp, HConstants.DEFAULT_CLUSTER_ID), kvs);
-
- // create a old root edit (-ROOT-).
- WAL.Entry rootEntry = createAEntry(new HLogKey(Bytes.toBytes(TableName.OLD_ROOT_STR),
- TableName.OLD_ROOT_TABLE_NAME, ++logCount, timestamp,
- HConstants.DEFAULT_CLUSTER_ID), kvs);
-
- // create a old meta edit (hbase:meta).
- WAL.Entry oldMetaEntry = createAEntry(new HLogKey(Bytes.toBytes(TableName.OLD_META_STR),
- TableName.OLD_META_TABLE_NAME, ++logCount, timestamp,
- HConstants.DEFAULT_CLUSTER_ID), kvs);
-
- // write above entries
- writer = WALFactory.createWALWriter(fs, path, conf);
- writer.append(tEntry);
- writer.append(rootEntry);
- writer.append(oldMetaEntry);
-
- // sync/close the writer
- writer.sync();
- writer.close();
-
- // read the log and see things are okay.
- reader = WALFactory.createReader(fs, path, conf);
- WAL.Entry entry = reader.next();
- assertNotNull(entry);
- assertTrue(entry.getKey().getTablename().equals(t));
- assertEquals(Bytes.toString(entry.getKey().getEncodedRegionName()),
- Bytes.toString(tRegionInfo.getEncodedNameAsBytes()));
-
- // read the ROOT waledit, but that will be ignored, and hbase:meta waledit will be read instead.
- entry = reader.next();
- assertEquals(entry.getKey().getTablename(), TableName.META_TABLE_NAME);
- // should reach end of log
- assertNull(reader.next());
- } finally {
- if (writer != null) {
- writer.close();
- }
- if (reader != null) {
- reader.close();
- }
- }
-}
- /**
- * Creates a WALEdit for the passed KeyValues and returns a WALProvider.Entry instance composed of
- * the WALEdit and passed WALKey.
- * @return WAL.Entry instance for the passed WALKey and KeyValues
- */
- private WAL.Entry createAEntry(WALKey walKey, List<KeyValue> kvs) {
- WALEdit edit = new WALEdit();
- for (KeyValue kv : kvs )
- edit.add(kv);
- return new WAL.Entry(walKey, edit);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/47a4e343/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
index 3d4062f..2dbacaf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
@@ -18,6 +18,12 @@
package org.apache.hadoop.hbase.replication;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import com.google.common.collect.Lists;
+
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -30,22 +36,17 @@ import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.wal.WAL.Entry;
import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Assert;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import com.google.common.collect.Lists;
-
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
-
@Category({ReplicationTests.class, SmallTests.class})
public class TestReplicationWALEntryFilters {
@@ -59,20 +60,21 @@ public class TestReplicationWALEntryFilters {
SystemTableWALEntryFilter filter = new SystemTableWALEntryFilter();
// meta
- WALKey key1 = new WALKey( HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
- TableName.META_TABLE_NAME, null);
+ WALKey key1 = new WALKey(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
+ TableName.META_TABLE_NAME, System.currentTimeMillis());
Entry metaEntry = new Entry(key1, null);
assertNull(filter.filter(metaEntry));
// ns table
- WALKey key2 = new WALKey(new byte[] {}, TableName.NAMESPACE_TABLE_NAME, null);
+ WALKey key2 =
+ new WALKey(new byte[0], TableName.NAMESPACE_TABLE_NAME, System.currentTimeMillis());
Entry nsEntry = new Entry(key2, null);
assertNull(filter.filter(nsEntry));
// user table
- WALKey key3 = new WALKey(new byte[] {}, TableName.valueOf("foo"), null);
+ WALKey key3 = new WALKey(new byte[0], TableName.valueOf("foo"), System.currentTimeMillis());
Entry userEntry = new Entry(key3, null);
assertEquals(userEntry, filter.filter(userEntry));
@@ -298,7 +300,8 @@ public class TestReplicationWALEntryFilters {
}
private Entry createEntry(TreeMap<byte[], Integer> scopes, byte[]... kvs) {
- WALKey key1 = new WALKey(new byte[] {}, TableName.valueOf("foo"), scopes);
+ WALKey key1 =
+ new WALKey(new byte[0], TableName.valueOf("foo"), System.currentTimeMillis(), scopes);
WALEdit edit1 = new WALEdit();
for (byte[] kv : kvs) {