You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zg...@apache.org on 2020/08/26 03:59:04 UTC
[hbase] branch branch-2.2 updated: HBASE-23814 Add null checks and
logging to misc set of tests
This is an automated email from the ASF dual-hosted git repository.
zghao pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.2 by this push:
new 147e4ff HBASE-23814 Add null checks and logging to misc set of tests
147e4ff is described below
commit 147e4ff6a73a63e71f4167b2da9dee0e1760a125
Author: stack <st...@apache.org>
AuthorDate: Fri Feb 7 15:11:11 2020 -0800
HBASE-23814 Add null checks and logging to misc set of tests
Signed-off-by: Nick Dimiduk <nd...@apache.org>
Signed-off-by: Zach York <zy...@apache.org>
* TestFullLogReconstruction log the server we've chosen to expire and then note where we starting counting rows
* TestAsyncTableScanException use a define for row counts
* TestRawAsyncTableLimitedScanWithFilter check connection was made before closing it in tearDown
* TestLogsCleaner use single mod time. Make it for sure less than now in case test runs all in the same millisecond (would cause test fail)
* TestReplicationBase test table is non-null before closing in tearDown
---
.../hbase/replication/master/ReplicationLogCleaner.java | 4 ++--
.../org/apache/hadoop/hbase/TestFullLogReconstruction.java | 8 +++++++-
.../hbase/client/TestAsyncTableRegionReplicasScan.java | 2 +-
.../hadoop/hbase/client/TestAsyncTableScanException.java | 8 +++++---
.../hbase/client/TestRawAsyncTableLimitedScanWithFilter.java | 4 +++-
.../apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java | 7 +++++--
.../apache/hadoop/hbase/replication/TestReplicationBase.java | 12 +++++++++---
.../java/org/apache/hadoop/hbase/tool/TestCanaryTool.java | 2 +-
8 files changed, 33 insertions(+), 14 deletions(-)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
index caca968..e51f5ef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
@@ -84,8 +84,8 @@ public class ReplicationLogCleaner extends BaseLogCleanerDelegate {
}
String wal = file.getPath().getName();
boolean logInReplicationQueue = wals.contains(wal);
- if (logInReplicationQueue) {
- LOG.debug("Found up in ZooKeeper, NOT deleting={}", wal);
+ if (logInReplicationQueue) {
+ LOG.debug("Found up in ZooKeeper, NOT deleting={}", wal);
}
return !logInReplicationQueue && (file.getModificationTime() < readZKTimestamp);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
index 87152fc..9cbd8bc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
@@ -31,9 +31,12 @@ import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@Category({ MiscTests.class, LargeTests.class })
public class TestFullLogReconstruction {
+ private static final Logger LOG = LoggerFactory.getLogger(TestFullLogReconstruction.class);
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
@@ -82,7 +85,9 @@ public class TestFullLogReconstruction {
TEST_UTIL.loadTable(table, FAMILY);
}
RegionServerThread rsThread = TEST_UTIL.getHBaseCluster().getRegionServerThreads().get(0);
- TEST_UTIL.expireRegionServerSession(0);
+ int index = 0;
+ LOG.info("Expiring {}", TEST_UTIL.getMiniHBaseCluster().getRegionServer(index));
+ TEST_UTIL.expireRegionServerSession(index);
// make sure that the RS is fully down before reading, so that we will read the data from other
// RSes.
TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
@@ -97,6 +102,7 @@ public class TestFullLogReconstruction {
return rsThread.getRegionServer() + " is still alive";
}
});
+ LOG.info("Starting count");
int newCount = TEST_UTIL.countRows(table);
assertEquals(count, newCount);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java
index dd5c8e5..bd0f00c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasScan.java
@@ -66,7 +66,7 @@ public class TestAsyncTableRegionReplicasScan extends AbstractTestAsyncTableRegi
scan.setReplicaId(replicaId);
}
try (ResultScanner scanner = table.getScanner(scan)) {
- for (int i = 0; i < 1000; i++) {
+ for (int i = 0; i < ROW_COUNT; i++) {
Result result = scanner.next();
assertNotNull(result);
assertArrayEquals(getValue(i), result.getValue(FAMILY, QUALIFIER));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java
index a1715cc..96ec86b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableScanException.java
@@ -74,6 +74,8 @@ public class TestAsyncTableScanException {
private static volatile boolean DO_NOT_RETRY;
+ private static final int ROW_COUNT = 100;
+
public static final class ErrorCP implements RegionObserver, RegionCoprocessor {
@Override
@@ -99,13 +101,13 @@ public class TestAsyncTableScanException {
@BeforeClass
public static void setUp() throws Exception {
- UTIL.startMiniCluster(3);
+ UTIL.startMiniCluster(1);
UTIL.getAdmin()
.createTable(TableDescriptorBuilder.newBuilder(TABLE_NAME)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY))
.setCoprocessor(ErrorCP.class.getName()).build());
try (Table table = UTIL.getConnection().getTable(TABLE_NAME)) {
- for (int i = 0; i < 100; i++) {
+ for (int i = 0; i < ROW_COUNT; i++) {
table.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(i)));
}
}
@@ -151,7 +153,7 @@ public class TestAsyncTableScanException {
private void count() throws IOException {
try (ResultScanner scanner = CONN.getTable(TABLE_NAME).getScanner(new Scan().setCaching(1))) {
- for (int i = 0; i < 100; i++) {
+ for (int i = 0; i < ROW_COUNT; i++) {
Result result = scanner.next();
assertArrayEquals(Bytes.toBytes(i), result.getRow());
assertArrayEquals(Bytes.toBytes(i), result.getValue(FAMILY, QUAL));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java
index 61eb440..3ab9b14 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableLimitedScanWithFilter.java
@@ -81,7 +81,9 @@ public class TestRawAsyncTableLimitedScanWithFilter {
@AfterClass
public static void tearDown() throws Exception {
- CONN.close();
+ if (CONN != null) {
+ CONN.close();
+ }
UTIL.shutdownMiniCluster();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index 32e6635..b3d78fc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -267,9 +267,12 @@ public class TestLogsCleaner {
public void testZooKeeperNormal() throws Exception {
ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
+ // Subtract 1000 from current time so modtime is for sure older
+ // than 'now'.
+ long modTime = System.currentTimeMillis() - 1000;
List<FileStatus> dummyFiles = Arrays.asList(
- new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log1")),
- new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log2"))
+ new FileStatus(100, false, 3, 100, modTime, new Path("log1")),
+ new FileStatus(100, false, 3, 100, modTime, new Path("log2"))
);
ZKWatcher zkw = new ZKWatcher(conf, "testZooKeeperAbort-normal", null);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
index caf0c96..3004072 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
@@ -322,9 +322,15 @@ public class TestReplicationBase {
@AfterClass
public static void tearDownAfterClass() throws Exception {
- htable2.close();
- htable1.close();
- admin.close();
+ if (htable2 != null) {
+ htable2.close();
+ }
+ if (htable1 != null) {
+ htable1.close();
+ }
+ if (admin != null) {
+ admin.close();
+ }
UTIL2.shutdownMiniCluster();
UTIL1.shutdownMiniCluster();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
index 8aa8d27..72e35b3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
@@ -271,7 +271,7 @@ public class TestCanaryTool {
verify(mockAppender, times(1)).doAppend(argThat(new ArgumentMatcher<LoggingEvent>() {
@Override
public boolean matches(LoggingEvent argument) {
- return ((LoggingEvent) argument).getRenderedMessage().contains("exceeded the configured read timeout.");
+ return argument.getRenderedMessage().contains("exceeded the configured read timeout.");
}
}));
verify(mockAppender, times(2)).doAppend(argThat(new ArgumentMatcher<LoggingEvent>() {