You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2020/06/06 11:06:26 UTC
[hbase] branch master updated: HBASE-24510 Remove HBaseTestCase and
GenericTestUtils (#1859)
This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/master by this push:
new 16116fa HBASE-24510 Remove HBaseTestCase and GenericTestUtils (#1859)
16116fa is described below
commit 16116fa35e2e4a8e1728b3f31dd4dbb06cbd3ba5
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Sat Jun 6 19:06:11 2020 +0800
HBASE-24510 Remove HBaseTestCase and GenericTestUtils (#1859)
Signed-off-by: Michael Stack <st...@apache.org>
---
.../org/apache/hadoop/hbase/GenericTestUtils.java | 321 --------------
.../org/apache/hadoop/hbase/HBaseTestCase.java | 459 ---------------------
.../java/org/apache/hadoop/hbase/HTestConst.java | 135 +++++-
.../org/apache/hadoop/hbase/client/TestResult.java | 6 +-
.../coprocessor/TestCoprocessorInterface.java | 6 +-
.../hbase/regionserver/TestBlocksScanned.java | 3 +-
.../hadoop/hbase/regionserver/TestCompaction.java | 12 +-
.../hadoop/hbase/regionserver/TestHStoreFile.java | 410 ++++++++----------
.../hbase/regionserver/TestKeyValueHeap.java | 154 ++++---
.../hbase/regionserver/TestMajorCompaction.java | 111 +++--
.../hbase/regionserver/TestMinorCompaction.java | 11 +-
.../hadoop/hbase/regionserver/TestScanner.java | 20 +-
.../hadoop/hbase/regionserver/TestWideScanner.java | 100 +++--
13 files changed, 520 insertions(+), 1228 deletions(-)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/GenericTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/GenericTestUtils.java
deleted file mode 100644
index 2fc5dd7..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/GenericTestUtils.java
+++ /dev/null
@@ -1,321 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadInfo;
-import java.lang.management.ThreadMXBean;
-import java.lang.reflect.InvocationTargetException;
-import java.util.Arrays;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.regex.Pattern;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-import org.slf4j.Logger;
-
-import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-import org.apache.hbase.thirdparty.com.google.common.base.Supplier;
-import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-
-/**
- * Test provides some very generic helpers which might be used across the tests
- */
-public abstract class GenericTestUtils {
-
- private static final AtomicInteger sequence = new AtomicInteger();
-
- /**
- * Extracts the name of the method where the invocation has happened
- * @return String name of the invoking method
- */
- public static String getMethodName() {
- return Thread.currentThread().getStackTrace()[2].getMethodName();
- }
-
- /**
- * Generates a process-wide unique sequence number.
- * @return a unique sequence number
- */
- public static int uniqueSequenceId() {
- return sequence.incrementAndGet();
- }
-
- /**
- * Assert that a given file exists.
- */
- public static void assertExists(File f) {
- Assert.assertTrue("File " + f + " should exist", f.exists());
- }
-
- /**
- * List all of the files in 'dir' that match the regex 'pattern'.
- * Then check that this list is identical to 'expectedMatches'.
- * @throws IOException if the dir is inaccessible
- */
- public static void assertGlobEquals(File dir, String pattern,
- String ... expectedMatches) throws IOException {
-
- Set<String> found = Sets.newTreeSet();
- for (File f : FileUtil.listFiles(dir)) {
- if (f.getName().matches(pattern)) {
- found.add(f.getName());
- }
- }
- Set<String> expectedSet = Sets.newTreeSet(
- Arrays.asList(expectedMatches));
- Assert.assertEquals("Bad files matching " + pattern + " in " + dir,
- Joiner.on(",").join(expectedSet),
- Joiner.on(",").join(found));
- }
-
- public static void waitFor(Supplier<Boolean> check,
- int checkEveryMillis, int waitForMillis)
- throws TimeoutException, InterruptedException
- {
- long st = Time.now();
- do {
- boolean result = check.get();
- if (result) {
- return;
- }
-
- Thread.sleep(checkEveryMillis);
- } while (Time.now() - st < waitForMillis);
-
- throw new TimeoutException("Timed out waiting for condition. " +
- "Thread diagnostics:\n" +
- TimedOutTestsListener.buildThreadDiagnosticString());
- }
-
- /**
- * Mockito answer helper that triggers one latch as soon as the
- * method is called, then waits on another before continuing.
- */
- public static class DelayAnswer implements Answer<Object> {
- private final Logger LOG;
-
- private final CountDownLatch fireLatch = new CountDownLatch(1);
- private final CountDownLatch waitLatch = new CountDownLatch(1);
- private final CountDownLatch resultLatch = new CountDownLatch(1);
-
- private final AtomicInteger fireCounter = new AtomicInteger(0);
- private final AtomicInteger resultCounter = new AtomicInteger(0);
-
- // Result fields set after proceed() is called.
- private volatile Throwable thrown;
- private volatile Object returnValue;
-
- public DelayAnswer(Logger log) {
- this.LOG = log;
- }
-
- /**
- * Wait until the method is called.
- */
- public void waitForCall() throws InterruptedException {
- fireLatch.await();
- }
-
- /**
- * Tell the method to proceed.
- * This should only be called after waitForCall()
- */
- public void proceed() {
- waitLatch.countDown();
- }
-
- @Override
- public Object answer(InvocationOnMock invocation) throws Throwable {
- LOG.info("DelayAnswer firing fireLatch");
- fireCounter.getAndIncrement();
- fireLatch.countDown();
- try {
- LOG.info("DelayAnswer waiting on waitLatch");
- waitLatch.await();
- LOG.info("DelayAnswer delay complete");
- } catch (InterruptedException ie) {
- throw new IOException("Interrupted waiting on latch", ie);
- }
- return passThrough(invocation);
- }
-
- protected Object passThrough(InvocationOnMock invocation) throws Throwable {
- try {
- Object ret = invocation.callRealMethod();
- returnValue = ret;
- return ret;
- } catch (Throwable t) {
- thrown = t;
- throw t;
- } finally {
- resultCounter.incrementAndGet();
- resultLatch.countDown();
- }
- }
-
- /**
- * After calling proceed(), this will wait until the call has
- * completed and a result has been returned to the caller.
- */
- public void waitForResult() throws InterruptedException {
- resultLatch.await();
- }
-
- /**
- * After the call has gone through, return any exception that
- * was thrown, or null if no exception was thrown.
- */
- public Throwable getThrown() {
- return thrown;
- }
-
- /**
- * After the call has gone through, return the call's return value,
- * or null in case it was void or an exception was thrown.
- */
- public Object getReturnValue() {
- return returnValue;
- }
-
- public int getFireCount() {
- return fireCounter.get();
- }
-
- public int getResultCount() {
- return resultCounter.get();
- }
- }
-
- /**
- * An Answer implementation that simply forwards all calls through
- * to a delegate.
- *
- * This is useful as the default Answer for a mock object, to create
- * something like a spy on an RPC proxy. For example:
- * <code>
- * NamenodeProtocol origNNProxy = secondary.getNameNode();
- * NamenodeProtocol spyNNProxy = Mockito.mock(NameNodeProtocol.class,
- * new DelegateAnswer(origNNProxy);
- * doThrow(...).when(spyNNProxy).getBlockLocations(...);
- * ...
- * </code>
- */
- public static class DelegateAnswer implements Answer<Object> {
- private final Object delegate;
- private final Logger log;
-
- public DelegateAnswer(Object delegate) {
- this(null, delegate);
- }
-
- public DelegateAnswer(Logger log, Object delegate) {
- this.log = log;
- this.delegate = delegate;
- }
-
- @Override
- public Object answer(InvocationOnMock invocation) throws Throwable {
- try {
- if (log != null) {
- log.info("Call to " + invocation + " on " + delegate,
- new Exception("TRACE"));
- }
- return invocation.getMethod().invoke(
- delegate, invocation.getArguments());
- } catch (InvocationTargetException ite) {
- throw ite.getCause();
- }
- }
- }
-
- /**
- * An Answer implementation which sleeps for a random number of milliseconds
- * between 0 and a configurable value before delegating to the real
- * implementation of the method. This can be useful for drawing out race
- * conditions.
- */
- public static class SleepAnswer implements Answer<Object> {
- private final int maxSleepTime;
- private static Random r = new Random();
-
- public SleepAnswer(int maxSleepTime) {
- this.maxSleepTime = maxSleepTime;
- }
-
- @Override
- public Object answer(InvocationOnMock invocation) throws Throwable {
- boolean interrupted = false;
- try {
- Thread.sleep(r.nextInt(maxSleepTime));
- } catch (InterruptedException ie) {
- interrupted = true;
- }
- try {
- return invocation.callRealMethod();
- } finally {
- if (interrupted) {
- Thread.currentThread().interrupt();
- }
- }
- }
- }
-
- public static void assertMatches(String output, String pattern) {
- Assert.assertTrue("Expected output to match /" + pattern + "/" +
- " but got:\n" + output,
- Pattern.compile(pattern).matcher(output).find());
- }
-
- public static void assertValueNear(long expected, long actual, long allowedError) {
- assertValueWithinRange(expected - allowedError, expected + allowedError, actual);
- }
-
- public static void assertValueWithinRange(long expectedMin, long expectedMax,
- long actual) {
- Assert.assertTrue("Expected " + actual + " to be in range (" + expectedMin + ","
- + expectedMax + ")", expectedMin <= actual && actual <= expectedMax);
- }
-
- /**
- * Assert that there are no threads running whose name matches the
- * given regular expression.
- * @param regex the regex to match against
- */
- public static void assertNoThreadsMatching(String regex) {
- Pattern pattern = Pattern.compile(regex);
- ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
-
- ThreadInfo[] infos = threadBean.getThreadInfo(threadBean.getAllThreadIds(), 20);
- for (ThreadInfo info : infos) {
- if (info == null) continue;
- if (pattern.matcher(info.getThreadName()).matches()) {
- Assert.fail("Leaked thread: " + info + "\n" +
- Joiner.on("\n").join(info.getStackTrace()));
- }
- }
- }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
deleted file mode 100644
index 6c31eef..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
+++ /dev/null
@@ -1,459 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.util.NavigableMap;
-import junit.framework.AssertionFailedError;
-import junit.framework.TestCase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.log.HBaseMarkers;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.RegionAsTable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Abstract HBase test class. Initializes a few things that can come in handly
- * like an HBaseConfiguration and filesystem.
- * @deprecated since 2.0.0 and will be removed in 3.0.0. Write junit4 unit tests using
- * {@link HBaseTestingUtility}.
- * @see HBaseTestingUtility
- * @see <a href="https://issues.apache.org/jira/browse/HBASE-11912">HBASE-11912</a>
- */
-@Deprecated
-public abstract class HBaseTestCase extends TestCase {
- private static final Logger LOG = LoggerFactory.getLogger(HBaseTestCase.class);
-
- protected final static byte [] fam1 = Bytes.toBytes("colfamily11");
- protected final static byte [] fam2 = Bytes.toBytes("colfamily21");
- protected final static byte [] fam3 = Bytes.toBytes("colfamily31");
-
- protected static final byte [][] COLUMNS = {fam1, fam2, fam3};
-
- private boolean localfs = false;
- protected static Path testDir = null;
- protected FileSystem fs = null;
- protected HRegion meta = null;
- protected static final char FIRST_CHAR = 'a';
- protected static final char LAST_CHAR = 'z';
- protected static final String PUNCTUATION = "~`@#$%^&*()-_+=:;',.<>/?[]{}|";
- protected static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
- protected String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
- protected static final int MAXVERSIONS = 3;
-
- protected final HBaseTestingUtility testUtil = new HBaseTestingUtility();
-
- public volatile Configuration conf = testUtil.getConfiguration();
- public final FSTableDescriptors fsTableDescriptors;
- {
- try {
- fsTableDescriptors = new FSTableDescriptors(conf);
- } catch (IOException e) {
- throw new RuntimeException("Failed to init descriptors", e);
- }
- }
-
- /** constructor */
- public HBaseTestCase() {
- super();
- }
-
- /**
- * @param name
- */
- public HBaseTestCase(String name) {
- super(name);
- }
-
- /**
- * Note that this method must be called after the mini hdfs cluster has
- * started or we end up with a local file system.
- */
- @Override
- protected void setUp() throws Exception {
- super.setUp();
- localfs =
- (conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0);
-
- if (fs == null) {
- this.fs = FileSystem.get(conf);
- }
- try {
- if (localfs) {
- testDir = getUnitTestdir(getName());
- if (fs.exists(testDir)) {
- fs.delete(testDir, true);
- }
- } else {
- testDir = CommonFSUtils.getRootDir(conf);
- }
- } catch (Exception e) {
- LOG.error(HBaseMarkers.FATAL, "error during setup", e);
- throw e;
- }
- }
-
- @Override
- protected void tearDown() throws Exception {
- try {
- if (localfs) {
- if (this.fs.exists(testDir)) {
- this.fs.delete(testDir, true);
- }
- }
- } catch (Exception e) {
- LOG.error(HBaseMarkers.FATAL, "error during tear down", e);
- }
- super.tearDown();
- }
-
- /**
- * @see HBaseTestingUtility#getBaseTestDir
- * @param testName
- * @return directory to use for this test
- */
- protected Path getUnitTestdir(String testName) {
- return testUtil.getDataTestDir(testName);
- }
-
- /**
- * You must call close on the returned region and then close on the log file it created. Do
- * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to close both the region and the WAL.
- * @param tableDescriptor TableDescriptor
- * @param startKey Start Key
- * @param endKey End Key
- * @return An {@link HRegion}
- * @throws IOException If thrown by
- * {@link #createNewHRegion(TableDescriptor, byte[], byte[], Configuration)}
- */
- public HRegion createNewHRegion(TableDescriptor tableDescriptor, byte [] startKey,
- byte [] endKey)
- throws IOException {
- return createNewHRegion(tableDescriptor, startKey, endKey, this.conf);
- }
-
- public HRegion createNewHRegion(TableDescriptor tableDescriptor, byte[] startKey, byte[] endKey,
- Configuration conf) throws IOException {
- RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName())
- .setStartKey(startKey).setEndKey(endKey).build();
- return HBaseTestingUtility.createRegionAndWAL(hri, testDir, conf, tableDescriptor);
- }
-
- protected HRegion openClosedRegion(final HRegion closedRegion)
- throws IOException {
- return HRegion.openHRegion(closedRegion, null);
- }
-
- /**
- * Create a table of name {@code name} with {@link #COLUMNS} for
- * families.
- * @param name Name to give table.
- * @return Column descriptor.
- */
- protected TableDescriptor createTableDescriptor(final String name) {
- return createTableDescriptor(name, MAXVERSIONS);
- }
-
- /**
- * Create a table of name {@code name} with {@link #COLUMNS} for
- * families.
- * @param name Name to give table.
- * @param versions How many versions to allow per column.
- * @return Column descriptor.
- */
- protected TableDescriptor createTableDescriptor(final String name,
- final int versions) {
- return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
- versions, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
- }
-
- /**
- * Create a table of name {@code name} with {@link #COLUMNS} for
- * families.
- * @param name Name to give table.
- * @param versions How many versions to allow per column.
- * @return Column descriptor.
- */
- protected TableDescriptor createTableDescriptor(final String name,
- final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
- TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
- new TableDescriptorBuilder.ModifyableTableDescriptor(TableName.valueOf(name));
- for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
- tableDescriptor.setColumnFamily(
- new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(cfName)
- .setMinVersions(minVersions)
- .setMaxVersions(versions)
- .setKeepDeletedCells(keepDeleted)
- .setBlockCacheEnabled(false)
- .setTimeToLive(ttl)
- );
- }
- return tableDescriptor;
- }
-
- /**
- * Add content to region <code>r</code> on the passed column
- * <code>column</code>.
- * Adds data of the from 'aaa', 'aab', etc where key and value are the same.
- * @param r
- * @param columnFamily
- * @param column
- * @throws IOException
- * @return count of what we added.
- */
- public static long addContent(final Region r, final byte [] columnFamily, final byte[] column)
- throws IOException {
- byte [] startKey = r.getRegionInfo().getStartKey();
- byte [] endKey = r.getRegionInfo().getEndKey();
- byte [] startKeyBytes = startKey;
- if (startKeyBytes == null || startKeyBytes.length == 0) {
- startKeyBytes = START_KEY_BYTES;
- }
- return addContent(new RegionAsTable(r), Bytes.toString(columnFamily), Bytes.toString(column),
- startKeyBytes, endKey, -1);
- }
-
- public static long addContent(final Region r, final byte [] columnFamily) throws IOException {
- return addContent(r, columnFamily, null);
- }
-
- /**
- * Add content to region <code>r</code> on the passed column
- * <code>column</code>.
- * Adds data of the from 'aaa', 'aab', etc where key and value are the same.
- * @throws IOException
- * @return count of what we added.
- */
- public static long addContent(final Table updater,
- final String columnFamily) throws IOException {
- return addContent(updater, columnFamily, START_KEY_BYTES, null);
- }
-
- public static long addContent(final Table updater, final String family,
- final String column) throws IOException {
- return addContent(updater, family, column, START_KEY_BYTES, null);
- }
-
- /**
- * Add content to region <code>r</code> on the passed column
- * <code>column</code>.
- * Adds data of the from 'aaa', 'aab', etc where key and value are the same.
- * @return count of what we added.
- * @throws IOException
- */
- public static long addContent(final Table updater, final String columnFamily,
- final byte [] startKeyBytes, final byte [] endKey)
- throws IOException {
- return addContent(updater, columnFamily, null, startKeyBytes, endKey, -1);
- }
-
- public static long addContent(final Table updater, final String family, String column,
- final byte [] startKeyBytes, final byte [] endKey) throws IOException {
- return addContent(updater, family, column, startKeyBytes, endKey, -1);
- }
-
- /**
- * Add content to region <code>r</code> on the passed column
- * <code>column</code>.
- * Adds data of the from 'aaa', 'aab', etc where key and value are the same.
- * @return count of what we added.
- * @throws IOException
- */
- public static long addContent(final Table updater,
- final String columnFamily,
- final String column,
- final byte [] startKeyBytes, final byte [] endKey, final long ts)
- throws IOException {
- long count = 0;
- // Add rows of three characters. The first character starts with the
- // 'a' character and runs up to 'z'. Per first character, we run the
- // second character over same range. And same for the third so rows
- // (and values) look like this: 'aaa', 'aab', 'aac', etc.
- char secondCharStart = (char)startKeyBytes[1];
- char thirdCharStart = (char)startKeyBytes[2];
- EXIT: for (char c = (char)startKeyBytes[0]; c <= LAST_CHAR; c++) {
- for (char d = secondCharStart; d <= LAST_CHAR; d++) {
- for (char e = thirdCharStart; e <= LAST_CHAR; e++) {
- byte [] t = new byte [] {(byte)c, (byte)d, (byte)e};
- if (endKey != null && endKey.length > 0
- && Bytes.compareTo(endKey, t) <= 0) {
- break EXIT;
- }
- try {
- Put put;
- if(ts != -1) {
- put = new Put(t, ts);
- } else {
- put = new Put(t);
- }
- try {
- StringBuilder sb = new StringBuilder();
- if (column != null && column.contains(":")) {
- sb.append(column);
- } else {
- if (columnFamily != null) {
- sb.append(columnFamily);
- if (!columnFamily.endsWith(":")) {
- sb.append(":");
- }
- if (column != null) {
- sb.append(column);
- }
- }
- }
- byte[][] split =
- CellUtil.parseColumn(Bytes.toBytes(sb.toString()));
- if(split.length == 1) {
- byte[] qualifier = new byte[0];
- put.addColumn(split[0], qualifier, t);
- } else {
- put.addColumn(split[0], split[1], t);
- }
- put.setDurability(Durability.SKIP_WAL);
- updater.put(put);
- count++;
- } catch (RuntimeException ex) {
- ex.printStackTrace();
- throw ex;
- } catch (IOException ex) {
- ex.printStackTrace();
- throw ex;
- }
- } catch (RuntimeException ex) {
- ex.printStackTrace();
- throw ex;
- } catch (IOException ex) {
- ex.printStackTrace();
- throw ex;
- }
- }
- // Set start character back to FIRST_CHAR after we've done first loop.
- thirdCharStart = FIRST_CHAR;
- }
- secondCharStart = FIRST_CHAR;
- }
- return count;
- }
-
- protected void assertResultEquals(final HRegion region, final byte [] row,
- final byte [] family, final byte [] qualifier, final long timestamp,
- final byte [] value) throws IOException {
- Get get = new Get(row);
- get.setTimestamp(timestamp);
- Result res = region.get(get);
- NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map =
- res.getMap();
- byte [] res_value = map.get(family).get(qualifier).get(timestamp);
-
- if (value == null) {
- assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
- " at timestamp " + timestamp, null, res_value);
- } else {
- if (res_value == null) {
- fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
- " at timestamp " + timestamp + "\" was expected to be \"" +
- Bytes.toStringBinary(value) + " but was null");
- }
- if (res_value != null) {
- assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
- " at timestamp " +
- timestamp, value, new String(res_value, StandardCharsets.UTF_8));
- }
- }
- }
-
- /**
- * Common method to close down a MiniDFSCluster and the associated file system
- *
- * @param cluster
- */
- public static void shutdownDfs(MiniDFSCluster cluster) {
- if (cluster != null) {
- LOG.info("Shutting down Mini DFS ");
- try {
- cluster.shutdown();
- } catch (Exception e) {
- /// Can get a java.lang.reflect.UndeclaredThrowableException thrown
- // here because of an InterruptedException. Don't let exceptions in
- // here be cause of test failure.
- }
- try {
- FileSystem fs = cluster.getFileSystem();
- if (fs != null) {
- LOG.info("Shutting down FileSystem");
- fs.close();
- }
- FileSystem.closeAll();
- } catch (IOException e) {
- LOG.error("error closing file system", e);
- }
- }
- }
-
- /**
- * You must call {@link #closeRootAndMeta()} when done after calling this method. It does cleanup.
- * @throws IOException
- */
- protected void createMetaRegion() throws IOException {
- FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf);
- meta = HBaseTestingUtility.createRegionAndWAL(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir,
- conf, fsTableDescriptors.get(TableName.META_TABLE_NAME));
- }
-
- protected void closeRootAndMeta() throws IOException {
- HBaseTestingUtility.closeRegionAndWAL(meta);
- }
-
- public static void assertByteEquals(byte[] expected,
- byte[] actual) {
- if (Bytes.compareTo(expected, actual) != 0) {
- throw new AssertionFailedError("expected:<" +
- Bytes.toString(expected) + "> but was:<" +
- Bytes.toString(actual) + ">");
- }
- }
-
- public static void assertEquals(byte[] expected,
- byte[] actual) {
- if (Bytes.compareTo(expected, actual) != 0) {
- throw new AssertionFailedError("expected:<" +
- Bytes.toStringBinary(expected) + "> but was:<" +
- Bytes.toStringBinary(actual) + ">");
- }
- }
-}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java
index 268f79c..c490c83 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java
@@ -16,16 +16,21 @@
*/
package org.apache.hadoop.hbase;
+import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.Collections;
-
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionAsTable;
import org.apache.hadoop.hbase.util.Bytes;
/**
- * Similar to {@link HConstants} but for tests. Also provides some simple
- * static utility functions to generate test data.
+ * Similar to {@link HConstants} but for tests. Also provides some simple static utility functions
+ * to generate test data.
*/
public class HTestConst {
@@ -34,15 +39,13 @@ public class HTestConst {
public static final String DEFAULT_TABLE_STR = "MyTestTable";
public static final byte[] DEFAULT_TABLE_BYTES = Bytes.toBytes(DEFAULT_TABLE_STR);
- public static final TableName DEFAULT_TABLE =
- TableName.valueOf(DEFAULT_TABLE_BYTES);
+ public static final TableName DEFAULT_TABLE = TableName.valueOf(DEFAULT_TABLE_BYTES);
public static final String DEFAULT_CF_STR = "MyDefaultCF";
public static final byte[] DEFAULT_CF_BYTES = Bytes.toBytes(DEFAULT_CF_STR);
public static final Set<String> DEFAULT_CF_STR_SET =
- Collections.unmodifiableSet(new HashSet<>(
- Arrays.asList(new String[] { DEFAULT_CF_STR })));
+ Collections.unmodifiableSet(new HashSet<>(Arrays.asList(new String[] { DEFAULT_CF_STR })));
public static final String DEFAULT_ROW_STR = "MyTestRow";
public static final byte[] DEFAULT_ROW_BYTES = Bytes.toBytes(DEFAULT_ROW_STR);
@@ -53,12 +56,16 @@ public class HTestConst {
public static String DEFAULT_VALUE_STR = "MyTestValue";
public static byte[] DEFAULT_VALUE_BYTES = Bytes.toBytes(DEFAULT_VALUE_STR);
+ private static final char FIRST_CHAR = 'a';
+ private static final char LAST_CHAR = 'z';
+ private static final byte[] START_KEY_BYTES = { FIRST_CHAR, FIRST_CHAR, FIRST_CHAR };
+
/**
- * Generate the given number of unique byte sequences by appending numeric
- * suffixes (ASCII representations of decimal numbers).
+ * Generate the given number of unique byte sequences by appending numeric suffixes (ASCII
+ * representations of decimal numbers).
*/
public static byte[][] makeNAscii(byte[] base, int n) {
- byte [][] ret = new byte[n][];
+ byte[][] ret = new byte[n][];
for (int i = 0; i < n; i++) {
byte[] tail = Bytes.toBytes(Integer.toString(i));
ret[i] = Bytes.add(base, tail);
@@ -66,4 +73,112 @@ public class HTestConst {
return ret;
}
+ /**
+ * Add content to region <code>r</code> on the passed column <code>column</code>. Adds data of the
+ * from 'aaa', 'aab', etc where key and value are the same.
+ * @return count of what we added.
+ */
+ public static long addContent(final Region r, final byte[] columnFamily, final byte[] column)
+ throws IOException {
+ byte[] startKey = r.getRegionInfo().getStartKey();
+ byte[] endKey = r.getRegionInfo().getEndKey();
+ byte[] startKeyBytes = startKey;
+ if (startKeyBytes == null || startKeyBytes.length == 0) {
+ startKeyBytes = START_KEY_BYTES;
+ }
+ return addContent(new RegionAsTable(r), Bytes.toString(columnFamily), Bytes.toString(column),
+ startKeyBytes, endKey, -1);
+ }
+
+ public static long addContent(final Region r, final byte[] columnFamily) throws IOException {
+ return addContent(r, columnFamily, null);
+ }
+
+ /**
+ * Add content to region <code>r</code> on the passed column <code>column</code>. Adds data of the
+ * from 'aaa', 'aab', etc where key and value are the same.
+ * @return count of what we added.
+ */
+ public static long addContent(Table updater, String columnFamily) throws IOException {
+ return addContent(updater, columnFamily, START_KEY_BYTES, null);
+ }
+
+ public static long addContent(Table updater, String family, String column) throws IOException {
+ return addContent(updater, family, column, START_KEY_BYTES, null);
+ }
+
+ /**
+ * Add content to region <code>r</code> on the passed column <code>column</code>. Adds data of the
+ * from 'aaa', 'aab', etc where key and value are the same.
+ * @return count of what we added.
+ */
+ public static long addContent(Table updater, String columnFamily, byte[] startKeyBytes,
+ byte[] endKey) throws IOException {
+ return addContent(updater, columnFamily, null, startKeyBytes, endKey, -1);
+ }
+
+ public static long addContent(Table updater, String family, String column, byte[] startKeyBytes,
+ byte[] endKey) throws IOException {
+ return addContent(updater, family, column, startKeyBytes, endKey, -1);
+ }
+
+ /**
+ * Add content to region <code>r</code> on the passed column <code>column</code>. Adds data of the
+ * from 'aaa', 'aab', etc where key and value are the same.
+ * @return count of what we added.
+ */
+ public static long addContent(Table updater, String columnFamily, String column,
+ byte[] startKeyBytes, byte[] endKey, long ts) throws IOException {
+ long count = 0;
+ // Add rows of three characters. The first character starts with the
+ // 'a' character and runs up to 'z'. Per first character, we run the
+ // second character over same range. And same for the third so rows
+ // (and values) look like this: 'aaa', 'aab', 'aac', etc.
+ char secondCharStart = (char) startKeyBytes[1];
+ char thirdCharStart = (char) startKeyBytes[2];
+ EXIT: for (char c = (char) startKeyBytes[0]; c <= LAST_CHAR; c++) {
+ for (char d = secondCharStart; d <= LAST_CHAR; d++) {
+ for (char e = thirdCharStart; e <= LAST_CHAR; e++) {
+ byte[] t = new byte[] { (byte) c, (byte) d, (byte) e };
+ if (endKey != null && endKey.length > 0 && Bytes.compareTo(endKey, t) <= 0) {
+ break EXIT;
+ }
+ Put put;
+ if (ts != -1) {
+ put = new Put(t, ts);
+ } else {
+ put = new Put(t);
+ }
+ StringBuilder sb = new StringBuilder();
+ if (column != null && column.contains(":")) {
+ sb.append(column);
+ } else {
+ if (columnFamily != null) {
+ sb.append(columnFamily);
+ if (!columnFamily.endsWith(":")) {
+ sb.append(":");
+ }
+ if (column != null) {
+ sb.append(column);
+ }
+ }
+ }
+ byte[][] split = CellUtil.parseColumn(Bytes.toBytes(sb.toString()));
+ if (split.length == 1) {
+ byte[] qualifier = new byte[0];
+ put.addColumn(split[0], qualifier, t);
+ } else {
+ put.addColumn(split[0], split[1], t);
+ }
+ put.setDurability(Durability.SKIP_WAL);
+ updater.put(put);
+ count++;
+ }
+ // Set start character back to FIRST_CHAR after we've done first loop.
+ thirdCharStart = FIRST_CHAR;
+ }
+ secondCharStart = FIRST_CHAR;
+ }
+ return count;
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
index b38fb6a..e2a2333 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hbase.client;
-import static org.apache.hadoop.hbase.HBaseTestCase.assertByteEquals;
+import static org.junit.Assert.assertArrayEquals;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -158,7 +158,7 @@ public class TestResult extends TestCase {
for (int i = 0; i < 100; ++i) {
final byte[] qf = Bytes.toBytes(i);
- assertByteEquals(Bytes.add(value, Bytes.toBytes(i)), r.getValue(family, qf));
+ assertArrayEquals(Bytes.add(value, Bytes.toBytes(i)), r.getValue(family, qf));
assertTrue(r.containsColumn(family, qf));
}
}
@@ -177,7 +177,7 @@ public class TestResult extends TestCase {
for (int i = 0; i < 100; ++i) {
final byte[] qf = Bytes.toBytes(i);
- assertByteEquals(Bytes.add(value, Bytes.toBytes(i)), r.getValue(family, qf));
+ assertArrayEquals(Bytes.add(value, Bytes.toBytes(i)), r.getValue(family, qf));
assertTrue(r.containsColumn(family, qf));
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
index 59c3157..03e031a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
@@ -38,9 +38,9 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTestConst;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Get;
@@ -295,7 +295,7 @@ public class TestCoprocessorInterface {
HRegion region = initHRegion(tableName, name.getMethodName(), hc, new Class<?>[]{}, families);
for (int i = 0; i < 3; i++) {
- HBaseTestCase.addContent(region, fam3);
+ HTestConst.addContent(region, fam3);
region.flush(true);
}
@@ -357,7 +357,7 @@ public class TestCoprocessorInterface {
HRegion region = initHRegion(tableName, name.getMethodName(), hc,
new Class<?>[]{CoprocessorImpl.class}, families);
for (int i = 0; i < 3; i++) {
- HBaseTestCase.addContent(region, fam3);
+ HTestConst.addContent(region, fam3);
region.flush(true);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java
index 51fba2d..c3af661 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java
@@ -17,12 +17,11 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import static org.apache.hadoop.hbase.HBaseTestCase.addContent;
+import static org.apache.hadoop.hbase.HTestConst.addContent;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index 1a97067..7c8494a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -27,7 +27,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
+import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
@@ -48,9 +48,9 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTestConst;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Delete;
@@ -173,7 +173,7 @@ public class TestCompaction {
for (int j = 0; j < jmax; j++) {
p.addColumn(COLUMN_FAMILY, Bytes.toBytes(j), pad);
}
- HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
+ HTestConst.addContent(loader, Bytes.toString(COLUMN_FAMILY));
loader.put(p);
r.flush(true);
}
@@ -249,7 +249,7 @@ public class TestCompaction {
for (int j = 0; j < jmax; j++) {
p.addColumn(COLUMN_FAMILY, Bytes.toBytes(j), pad);
}
- HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
+ HTestConst.addContent(loader, Bytes.toString(COLUMN_FAMILY));
loader.put(p);
r.flush(true);
}
@@ -329,7 +329,7 @@ public class TestCompaction {
private void createStoreFile(final HRegion region, String family) throws IOException {
Table loader = new RegionAsTable(region);
- HBaseTestCase.addContent(loader, family);
+ HTestConst.addContent(loader, family);
region.flush(true);
}
@@ -503,7 +503,7 @@ public class TestCompaction {
for (int j = 0; j < jmax; j++) {
p.addColumn(COLUMN_FAMILY, Bytes.toBytes(j), pad);
}
- HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
+ HTestConst.addContent(loader, Bytes.toString(COLUMN_FAMILY));
loader.put(p);
r.flush(true);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 4d6b422..f55372f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -17,6 +17,13 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -37,7 +44,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
@@ -59,7 +65,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFileInfo;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.ReaderContext;
@@ -70,11 +75,13 @@ import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.CommonFSUtils;
-import org.junit.After;
+import org.junit.AfterClass;
import org.junit.Before;
import org.junit.ClassRule;
+import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -86,12 +93,12 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
/**
* Test HStoreFile
*/
-@Category({RegionServerTests.class, MediumTests.class})
-public class TestHStoreFile extends HBaseTestCase {
+@Category({ RegionServerTests.class, MediumTests.class })
+public class TestHStoreFile {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestHStoreFile.class);
+ HBaseClassTestRule.forClass(TestHStoreFile.class);
private static final Logger LOG = LoggerFactory.getLogger(TestHStoreFile.class);
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@@ -100,22 +107,31 @@ public class TestHStoreFile extends HBaseTestCase {
private static final ChecksumType CKTYPE = ChecksumType.CRC32C;
private static final int CKBYTES = 512;
private static String TEST_FAMILY = "cf";
+ private static final char FIRST_CHAR = 'a';
+ private static final char LAST_CHAR = 'z';
+
+ @Rule
+ public TestName name = new TestName();
+
+ private Configuration conf;
+ private Path testDir;
+ private FileSystem fs;
- @Override
@Before
- public void setUp() throws Exception {
- super.setUp();
+ public void setUp() throws IOException {
+ conf = TEST_UTIL.getConfiguration();
+ testDir = TEST_UTIL.getDataTestDir(name.getMethodName());
+ fs = testDir.getFileSystem(conf);
}
- @Override
- @After
- public void tearDown() throws Exception {
- super.tearDown();
+ @AfterClass
+ public static void tearDownAfterClass() {
+ TEST_UTIL.cleanupTestDir();
}
/**
- * Write a file and then assert that we can read from top and bottom halves
- * using two HalfMapFiles.
+ * Write a file and then assert that we can read from top and bottom halves using two
+ * HalfMapFiles.
*/
@Test
public void testBasicHalfMapFile() throws Exception {
@@ -124,11 +140,9 @@ public class TestHStoreFile extends HBaseTestCase {
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
new Path(testDir, hri.getTable().getNameAsString()), hri);
- HFileContext meta = new HFileContextBuilder().withBlockSize(2*1024).build();
+ HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withFilePath(regionFs.createTempName())
- .withFileContext(meta)
- .build();
+ .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
@@ -137,20 +151,20 @@ public class TestHStoreFile extends HBaseTestCase {
}
private void writeStoreFile(final StoreFileWriter writer) throws IOException {
- writeStoreFile(writer, Bytes.toBytes(getName()), Bytes.toBytes(getName()));
+ writeStoreFile(writer, Bytes.toBytes(name.getMethodName()),
+ Bytes.toBytes(name.getMethodName()));
}
// pick an split point (roughly halfway)
- byte[] SPLITKEY = new byte[] { (LAST_CHAR + FIRST_CHAR)/2, FIRST_CHAR};
+ byte[] SPLITKEY = new byte[] { (LAST_CHAR + FIRST_CHAR) / 2, FIRST_CHAR };
/*
- * Writes HStoreKey and ImmutableBytes data to passed writer and
- * then closes it.
+ * Writes HStoreKey and ImmutableBytes data to passed writer and then closes it.
* @param writer
* @throws IOException
*/
public static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte[] qualifier)
- throws IOException {
+ throws IOException {
long now = System.currentTimeMillis();
try {
for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) {
@@ -165,8 +179,8 @@ public class TestHStoreFile extends HBaseTestCase {
}
/**
- * Test that our mechanism of writing store files in one region to reference
- * store files in other regions works.
+ * Test that our mechanism of writing store files in one region to reference store files in other
+ * regions works.
*/
@Test
public void testReference() throws IOException {
@@ -178,20 +192,18 @@ public class TestHStoreFile extends HBaseTestCase {
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withFilePath(regionFs.createTempName())
- .withFileContext(meta)
- .build();
+ .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
HStoreFile hsf = new HStoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE, true);
hsf.initReader();
StoreFileReader reader = hsf.getReader();
- // Split on a row, not in middle of row. Midkey returned by reader
- // may be in middle of row. Create new one with empty column and
+ // Split on a row, not in middle of row. Midkey returned by reader
+ // may be in middle of row. Create new one with empty column and
// timestamp.
- byte [] midRow = CellUtil.cloneRow(reader.midKey().get());
- byte [] finalRow = CellUtil.cloneRow(reader.getLastKey().get());
+ byte[] midRow = CellUtil.cloneRow(reader.midKey().get());
+ byte[] finalRow = CellUtil.cloneRow(reader.getLastKey().get());
hsf.closeStoreFile(true);
// Make a reference
@@ -219,14 +231,14 @@ public class TestHStoreFile extends HBaseTestCase {
@Test
public void testStoreFileReference() throws Exception {
final RegionInfo hri =
- RegionInfoBuilder.newBuilder(TableName.valueOf("testStoreFileReference")).build();
+ RegionInfoBuilder.newBuilder(TableName.valueOf("testStoreFileReference")).build();
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
new Path(testDir, hri.getTable().getNameAsString()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
+ .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
writer.close();
@@ -236,7 +248,7 @@ public class TestHStoreFile extends HBaseTestCase {
StoreFileReader r = file.getReader();
assertNotNull(r);
StoreFileScanner scanner =
- new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false);
+ new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false);
// Verify after instantiating scanner refCount is increased
assertTrue("Verify file is being referenced", file.isReferencedInReads());
@@ -252,11 +264,12 @@ public class TestHStoreFile extends HBaseTestCase {
byte[] cf = Bytes.toBytes("ty");
ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(cf);
when(store.getColumnFamilyDescriptor()).thenReturn(cfd);
- StoreFileScanner scanner =
- new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true);
- Scan scan = new Scan();
- scan.setColumnFamilyTimeRange(cf, 0, 1);
- assertFalse(scanner.shouldUseScanner(scan, store, 0));
+ try (StoreFileScanner scanner =
+ new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true)) {
+ Scan scan = new Scan();
+ scan.setColumnFamilyTimeRange(cf, 0, 1);
+ assertFalse(scanner.shouldUseScanner(scan, store, 0));
+ }
}
@Test
@@ -266,22 +279,20 @@ public class TestHStoreFile extends HBaseTestCase {
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
CommonFSUtils.setRootDir(testConf, testDir);
- HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
- testConf, fs, CommonFSUtils.getTableDir(testDir, hri.getTable()), hri);
+ HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs,
+ CommonFSUtils.getTableDir(testDir, hri.getTable()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withFilePath(regionFs.createTempName())
- .withFileContext(meta)
- .build();
+ .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
- Path linkFilePath = new Path(dstPath,
- HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
+ Path linkFilePath =
+ new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// Try to open store file from link
StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath, true);
@@ -300,8 +311,8 @@ public class TestHStoreFile extends HBaseTestCase {
}
/**
- * This test creates an hfile and then the dir structures and files to verify that references
- * to hfilelinks (created by snapshot clones) can be properly interpreted.
+ * This test creates an hfile and then the dir structures and files to verify that references to
+ * hfilelinks (created by snapshot clones) can be properly interpreted.
*/
@Test
public void testReferenceToHFileLink() throws IOException {
@@ -317,21 +328,18 @@ public class TestHStoreFile extends HBaseTestCase {
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
StoreFileWriter writer = new StoreFileWriter.Builder(testConf, cacheConf, this.fs)
- .withFilePath(regionFs.createTempName())
- .withFileContext(meta)
- .build();
+ .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
// create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
RegionInfo hriClone = RegionInfoBuilder.newBuilder(TableName.valueOf("clone")).build();
- HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(
- testConf, fs, CommonFSUtils.getTableDir(testDir, hri.getTable()),
- hriClone);
+ HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs,
+ CommonFSUtils.getTableDir(testDir, hri.getTable()), hriClone);
Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
- Path linkFilePath = new Path(dstPath,
- HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
+ Path linkFilePath =
+ new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// create splits of the link.
// <root>/clone/splitA/<cf>/<reftohfilelink>,
@@ -348,7 +356,7 @@ public class TestHStoreFile extends HBaseTestCase {
CommonFSUtils.logFileSystemState(fs, testDir, LOG);
// There is a case where a file with the hfilelink pattern is actually a daughter
- // reference to a hfile link. This code in StoreFile that handles this case.
+ // reference to a hfile link. This code in StoreFile that handles this case.
// Try to open store file from link
HStoreFile hsfA = new HStoreFile(this.fs, pathA, testConf, cacheConf, BloomType.NONE, true);
@@ -371,7 +379,7 @@ public class TestHStoreFile extends HBaseTestCase {
HFileScanner sB = hsfB.getReader().getScanner(false, false);
sB.seekTo();
- //count++ as seekTo() will advance the scanner
+ // count++ as seekTo() will advance the scanner
count++;
while (sB.next()) {
count++;
@@ -385,8 +393,8 @@ public class TestHStoreFile extends HBaseTestCase {
throws IOException {
f.initReader();
Cell midkey = f.getReader().midKey().get();
- KeyValue midKV = (KeyValue)midkey;
- byte [] midRow = CellUtil.cloneRow(midKV);
+ KeyValue midKV = (KeyValue) midkey;
+ byte[] midRow = CellUtil.cloneRow(midKV);
// Create top split.
RegionInfo topHri =
RegionInfoBuilder.newBuilder(regionFs.getRegionInfo().getTable()).setEndKey(SPLITKEY).build();
@@ -414,13 +422,12 @@ public class TestHStoreFile extends HBaseTestCase {
ByteBuffer key = null;
HFileScanner topScanner = top.getScanner(false, false);
while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
- (topScanner.isSeeked() && topScanner.next())) {
+ (topScanner.isSeeked() && topScanner.next())) {
key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey());
if ((PrivateCellUtil.compare(topScanner.getReader().getComparator(), midKV, key.array(),
key.arrayOffset(), key.limit())) > 0) {
- fail("key=" + Bytes.toStringBinary(key) + " < midkey=" +
- midkey);
+ fail("key=" + Bytes.toStringBinary(key) + " < midkey=" + midkey);
}
if (first) {
first = false;
@@ -431,14 +438,12 @@ public class TestHStoreFile extends HBaseTestCase {
first = true;
HFileScanner bottomScanner = bottom.getScanner(false, false);
- while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
- bottomScanner.next()) {
+ while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) {
previous = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
if (first) {
first = false;
- LOG.info("First in bottom: " +
- Bytes.toString(Bytes.toBytes(previous)));
+ LOG.info("First in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
}
assertTrue(key.compareTo(bbMidkeyBytes) < 0);
}
@@ -452,7 +457,7 @@ public class TestHStoreFile extends HBaseTestCase {
// Next test using a midkey that does not exist in the file.
// First, do a key that is < than first key. Ensure splits behave
// properly.
- byte [] badmidkey = Bytes.toBytes(" .");
+ byte[] badmidkey = Bytes.toBytes(" .");
assertTrue(fs.exists(f.getPath()));
topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
@@ -466,8 +471,7 @@ public class TestHStoreFile extends HBaseTestCase {
first = true;
topScanner = top.getScanner(false, false);
KeyValue.KeyOnlyKeyValue keyOnlyKV = new KeyValue.KeyOnlyKeyValue();
- while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
- topScanner.next()) {
+ while ((!topScanner.isSeeked() && topScanner.seekTo()) || topScanner.next()) {
key = ByteBuffer.wrap(((KeyValue) topScanner.getKey()).getKey());
keyOnlyKV.setKey(key.array(), 0 + key.arrayOffset(), key.limit());
assertTrue(PrivateCellUtil.compare(topScanner.getReader().getComparator(), keyOnlyKV,
@@ -477,7 +481,7 @@ public class TestHStoreFile extends HBaseTestCase {
KeyValue keyKV = KeyValueUtil.createKeyValueFromKey(key);
LOG.info("First top when key < bottom: " + keyKV);
String tmp =
- Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
+ Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength());
for (int i = 0; i < tmp.length(); i++) {
assertTrue(tmp.charAt(i) == 'a');
}
@@ -495,7 +499,7 @@ public class TestHStoreFile extends HBaseTestCase {
// Test when badkey is > than last key in file ('||' > 'zz').
badmidkey = Bytes.toBytes("|||");
- topPath = splitStoreFile(regionFs,topHri, TEST_FAMILY, f, badmidkey, true);
+ topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
assertNull(topPath);
@@ -504,8 +508,7 @@ public class TestHStoreFile extends HBaseTestCase {
bottom = bottomF.getReader();
first = true;
bottomScanner = bottom.getScanner(false, false);
- while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
- bottomScanner.next()) {
+ while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || bottomScanner.next()) {
key = ByteBuffer.wrap(((KeyValue) bottomScanner.getKey()).getKey());
if (first) {
first = false;
@@ -521,7 +524,7 @@ public class TestHStoreFile extends HBaseTestCase {
LOG.info("Last bottom when key > top: " + keyKV);
for (int i = 0; i < tmp.length(); i++) {
assertTrue(Bytes.toString(keyKV.getRowArray(), keyKV.getRowOffset(), keyKV.getRowLength())
- .charAt(i) == 'z');
+ .charAt(i) == 'z');
}
} finally {
if (top != null) {
@@ -535,7 +538,7 @@ public class TestHStoreFile extends HBaseTestCase {
}
private static StoreFileScanner getStoreFileScanner(StoreFileReader reader, boolean cacheBlocks,
- boolean pread) {
+ boolean pread) {
return reader.getStoreFileScanner(cacheBlocks, pread, false, 0, 0, false);
}
@@ -547,8 +550,8 @@ public class TestHStoreFile extends HBaseTestCase {
long now = System.currentTimeMillis();
for (int i = 0; i < 2000; i += 2) {
String row = String.format(localFormatter, i);
- KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"),
- Bytes.toBytes("col"), now, Bytes.toBytes("value"));
+ KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"),
+ now, Bytes.toBytes("value"));
writer.append(kv);
}
writer.close();
@@ -556,7 +559,7 @@ public class TestHStoreFile extends HBaseTestCase {
ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build();
HFileInfo fileInfo = new HFileInfo(context, conf);
StoreFileReader reader =
- new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
+ new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
fileInfo.initMetaAndIndex(reader.getHFileReader());
reader.loadFileInfo();
reader.loadBloomfilter();
@@ -570,12 +573,11 @@ public class TestHStoreFile extends HBaseTestCase {
TreeSet<byte[]> columns = new TreeSet<>(Bytes.BYTES_COMPARATOR);
columns.add(Bytes.toBytes("family:col"));
- Scan scan = new Scan().withStartRow(Bytes.toBytes(row))
- .withStopRow(Bytes.toBytes(row), true);
+ Scan scan = new Scan().withStartRow(Bytes.toBytes(row)).withStopRow(Bytes.toBytes(row), true);
scan.addColumn(Bytes.toBytes("family"), Bytes.toBytes("family:col"));
HStore store = mock(HStore.class);
when(store.getColumnFamilyDescriptor())
- .thenReturn(ColumnFamilyDescriptorBuilder.of("family"));
+ .thenReturn(ColumnFamilyDescriptorBuilder.of("family"));
boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
if (i % 2 == 0) {
if (!exists) {
@@ -591,60 +593,48 @@ public class TestHStoreFile extends HBaseTestCase {
fs.delete(f, true);
assertEquals("False negatives: " + falseNeg, 0, falseNeg);
int maxFalsePos = (int) (2 * 2000 * err);
- assertTrue("Too many false positives: " + falsePos + " (err=" + err + ", expected no more than "
- + maxFalsePos + ")", falsePos <= maxFalsePos);
+ assertTrue("Too many false positives: " + falsePos + " (err=" + err +
+ ", expected no more than " + maxFalsePos + ")", falsePos <= maxFalsePos);
}
private static final int BLOCKSIZE_SMALL = 8192;
@Test
public void testBloomFilter() throws Exception {
- FileSystem fs = FileSystem.getLocal(conf);
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
// write the file
- Path f = new Path(ROOT_DIR, getName());
+ Path f = new Path(ROOT_DIR, name.getMethodName());
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
- .withChecksumType(CKTYPE)
- .withBytesPerCheckSum(CKBYTES).build();
+ .withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build();
// Make a store file and write data to it.
- StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withFilePath(f)
- .withBloomType(BloomType.ROW)
- .withMaxKeyCount(2000)
- .withFileContext(meta)
- .build();
+ StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f)
+ .withBloomType(BloomType.ROW).withMaxKeyCount(2000).withFileContext(meta).build();
bloomWriteRead(writer, fs);
}
@Test
public void testDeleteFamilyBloomFilter() throws Exception {
- FileSystem fs = FileSystem.getLocal(conf);
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
// write the file
- Path f = new Path(ROOT_DIR, getName());
+ Path f = new Path(ROOT_DIR, name.getMethodName());
- HFileContext meta = new HFileContextBuilder()
- .withBlockSize(BLOCKSIZE_SMALL)
- .withChecksumType(CKTYPE)
- .withBytesPerCheckSum(CKBYTES).build();
+ HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
+ .withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build();
// Make a store file and write data to it.
- StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withFilePath(f)
- .withMaxKeyCount(2000)
- .withFileContext(meta)
- .build();
+ StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f)
+ .withMaxKeyCount(2000).withFileContext(meta).build();
// add delete family
long now = System.currentTimeMillis();
for (int i = 0; i < 2000; i += 2) {
String row = String.format(localFormatter, i);
- KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"),
- Bytes.toBytes("col"), now, KeyValue.Type.DeleteFamily, Bytes.toBytes("value"));
+ KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"),
+ now, KeyValue.Type.DeleteFamily, Bytes.toBytes("value"));
writer.append(kv);
}
writer.close();
@@ -652,7 +642,7 @@ public class TestHStoreFile extends HBaseTestCase {
ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build();
HFileInfo fileInfo = new HFileInfo(context, conf);
StoreFileReader reader =
- new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
+ new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
fileInfo.initMetaAndIndex(reader.getHFileReader());
reader.loadFileInfo();
reader.loadBloomfilter();
@@ -679,8 +669,8 @@ public class TestHStoreFile extends HBaseTestCase {
fs.delete(f, true);
assertEquals("False negatives: " + falseNeg, 0, falseNeg);
int maxFalsePos = (int) (2 * 2000 * err);
- assertTrue("Too many false positives: " + falsePos + " (err=" + err
- + ", expected no more than " + maxFalsePos, falsePos <= maxFalsePos);
+ assertTrue("Too many false positives: " + falsePos + " (err=" + err +
+ ", expected no more than " + maxFalsePos, falsePos <= maxFalsePos);
}
/**
@@ -689,13 +679,11 @@ public class TestHStoreFile extends HBaseTestCase {
@Test
public void testReseek() throws Exception {
// write the file
- Path f = new Path(ROOT_DIR, getName());
+ Path f = new Path(ROOT_DIR, name.getMethodName());
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
- StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withFilePath(f)
- .withFileContext(meta)
- .build();
+ StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f)
+ .withFileContext(meta).build();
writeStoreFile(writer);
writer.close();
@@ -703,7 +691,7 @@ public class TestHStoreFile extends HBaseTestCase {
ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build();
HFileInfo fileInfo = new HFileInfo(context, conf);
StoreFileReader reader =
- new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
+ new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
fileInfo.initMetaAndIndex(reader.getHFileReader());
// Now do reseek with empty KV to position to the beginning of the file
@@ -727,51 +715,43 @@ public class TestHStoreFile extends HBaseTestCase {
int versions = 2;
// run once using columns and once using rows
- BloomType[] bt = {BloomType.ROWCOL, BloomType.ROW};
- int[] expKeys = {rowCount*colCount, rowCount};
- // below line deserves commentary. it is expected bloom false positives
- // column = rowCount*2*colCount inserts
- // row-level = only rowCount*2 inserts, but failures will be magnified by
- // 2nd for loop for every column (2*colCount)
- float[] expErr = {2*rowCount*colCount*err, 2*rowCount*2*colCount*err};
-
- for (int x : new int[]{0,1}) {
+ BloomType[] bt = { BloomType.ROWCOL, BloomType.ROW };
+ int[] expKeys = { rowCount * colCount, rowCount };
+ // below line deserves commentary. it is expected bloom false positives
+ // column = rowCount*2*colCount inserts
+ // row-level = only rowCount*2 inserts, but failures will be magnified by
+ // 2nd for loop for every column (2*colCount)
+ float[] expErr = { 2 * rowCount * colCount * err, 2 * rowCount * 2 * colCount * err };
+
+ for (int x : new int[] { 0, 1 }) {
// write the file
- Path f = new Path(ROOT_DIR, getName() + x);
+ Path f = new Path(ROOT_DIR, name.getMethodName() + x);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
- .withChecksumType(CKTYPE)
- .withBytesPerCheckSum(CKBYTES).build();
+ .withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build();
// Make a store file and write data to it.
- StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withFilePath(f)
- .withBloomType(bt[x])
- .withMaxKeyCount(expKeys[x])
- .withFileContext(meta)
- .build();
+ StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f)
+ .withBloomType(bt[x]).withMaxKeyCount(expKeys[x]).withFileContext(meta).build();
long now = System.currentTimeMillis();
- for (int i = 0; i < rowCount*2; i += 2) { // rows
- for (int j = 0; j < colCount*2; j += 2) { // column qualifiers
+ for (int i = 0; i < rowCount * 2; i += 2) { // rows
+ for (int j = 0; j < colCount * 2; j += 2) { // column qualifiers
String row = String.format(localFormatter, i);
String col = String.format(localFormatter, j);
- for (int k= 0; k < versions; ++k) { // versions
+ for (int k = 0; k < versions; ++k) { // versions
KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"),
- Bytes.toBytes("col" + col), now-k, Bytes.toBytes(-1L));
+ Bytes.toBytes("col" + col), now - k, Bytes.toBytes(-1L));
writer.append(kv);
}
}
}
writer.close();
- ReaderContext context = new ReaderContextBuilder()
- .withFilePath(f)
- .withFileSize(fs.getFileStatus(f).getLen())
- .withFileSystem(fs)
- .withInputStreamWrapper(new FSDataInputStreamWrapper(fs, f))
- .build();
+ ReaderContext context =
+ new ReaderContextBuilder().withFilePath(f).withFileSize(fs.getFileStatus(f).getLen())
+ .withFileSystem(fs).withInputStreamWrapper(new FSDataInputStreamWrapper(fs, f)).build();
HFileInfo fileInfo = new HFileInfo(context, conf);
StoreFileReader reader =
- new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
+ new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
fileInfo.initMetaAndIndex(reader.getHFileReader());
reader.loadFileInfo();
reader.loadBloomfilter();
@@ -780,23 +760,22 @@ public class TestHStoreFile extends HBaseTestCase {
HStore store = mock(HStore.class);
when(store.getColumnFamilyDescriptor())
- .thenReturn(ColumnFamilyDescriptorBuilder.of("family"));
+ .thenReturn(ColumnFamilyDescriptorBuilder.of("family"));
// check false positives rate
int falsePos = 0;
int falseNeg = 0;
- for (int i = 0; i < rowCount*2; ++i) { // rows
- for (int j = 0; j < colCount*2; ++j) { // column qualifiers
+ for (int i = 0; i < rowCount * 2; ++i) { // rows
+ for (int j = 0; j < colCount * 2; ++j) { // column qualifiers
String row = String.format(localFormatter, i);
String col = String.format(localFormatter, j);
TreeSet<byte[]> columns = new TreeSet<>(Bytes.BYTES_COMPARATOR);
columns.add(Bytes.toBytes("col" + col));
- Scan scan = new Scan().withStartRow(Bytes.toBytes(row))
- .withStopRow(Bytes.toBytes(row), true);
- scan.addColumn(Bytes.toBytes("family"), Bytes.toBytes(("col"+col)));
+ Scan scan =
+ new Scan().withStartRow(Bytes.toBytes(row)).withStopRow(Bytes.toBytes(row), true);
+ scan.addColumn(Bytes.toBytes("family"), Bytes.toBytes(("col" + col)));
- boolean exists =
- scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
+ boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
boolean shouldRowExist = i % 2 == 0;
boolean shouldColExist = j % 2 == 0;
shouldColExist = shouldColExist || bt[x] == BloomType.ROW;
@@ -817,25 +796,24 @@ public class TestHStoreFile extends HBaseTestCase {
System.out.println(" False negatives: " + falseNeg);
System.out.println(" False positives: " + falsePos);
assertEquals(0, falseNeg);
- assertTrue(falsePos < 2*expErr[x]);
+ assertTrue(falsePos < 2 * expErr[x]);
}
}
@Test
public void testSeqIdComparator() {
assertOrdering(StoreFileComparators.SEQ_ID, mockStoreFile(true, 100, 1000, -1, "/foo/123"),
- mockStoreFile(true, 100, 1000, -1, "/foo/124"),
- mockStoreFile(true, 99, 1000, -1, "/foo/126"),
- mockStoreFile(true, 98, 2000, -1, "/foo/126"), mockStoreFile(false, 3453, -1, 1, "/foo/1"),
- mockStoreFile(false, 2, -1, 3, "/foo/2"), mockStoreFile(false, 1000, -1, 5, "/foo/2"),
- mockStoreFile(false, 76, -1, 5, "/foo/3"));
+ mockStoreFile(true, 100, 1000, -1, "/foo/124"), mockStoreFile(true, 99, 1000, -1, "/foo/126"),
+ mockStoreFile(true, 98, 2000, -1, "/foo/126"), mockStoreFile(false, 3453, -1, 1, "/foo/1"),
+ mockStoreFile(false, 2, -1, 3, "/foo/2"), mockStoreFile(false, 1000, -1, 5, "/foo/2"),
+ mockStoreFile(false, 76, -1, 5, "/foo/3"));
}
/**
- * Assert that the given comparator orders the given storefiles in the
- * same way that they're passed.
+ * Assert that the given comparator orders the given storefiles in the same way that they're
+ * passed.
*/
- private void assertOrdering(Comparator<? super HStoreFile> comparator, HStoreFile ... sfs) {
+ private void assertOrdering(Comparator<? super HStoreFile> comparator, HStoreFile... sfs) {
ArrayList<HStoreFile> sorted = Lists.newArrayList(sfs);
Collections.shuffle(sorted);
Collections.sort(sorted, comparator);
@@ -847,11 +825,8 @@ public class TestHStoreFile extends HBaseTestCase {
/**
* Create a mock StoreFile with the given attributes.
*/
- private HStoreFile mockStoreFile(boolean bulkLoad,
- long size,
- long bulkTimestamp,
- long seqId,
- String path) {
+ private HStoreFile mockStoreFile(boolean bulkLoad, long size, long bulkTimestamp, long seqId,
+ String path) {
HStoreFile mock = Mockito.mock(HStoreFile.class);
StoreFileReader reader = Mockito.mock(StoreFileReader.class);
@@ -862,10 +837,8 @@ public class TestHStoreFile extends HBaseTestCase {
Mockito.doReturn(OptionalLong.of(bulkTimestamp)).when(mock).getBulkLoadTimestamp();
Mockito.doReturn(seqId).when(mock).getMaxSequenceId();
Mockito.doReturn(new Path(path)).when(mock).getPath();
- String name = "mock storefile, bulkLoad=" + bulkLoad +
- " bulkTimestamp=" + bulkTimestamp +
- " seqId=" + seqId +
- " path=" + path;
+ String name = "mock storefile, bulkLoad=" + bulkLoad + " bulkTimestamp=" + bulkTimestamp +
+ " seqId=" + seqId + " path=" + path;
Mockito.doReturn(name).when(mock).toString();
return mock;
}
@@ -874,14 +847,13 @@ public class TestHStoreFile extends HBaseTestCase {
* Generate a list of KeyValues for testing based on given parameters
* @return the rows key-value list
*/
- List<KeyValue> getKeyValueSet(long[] timestamps, int numRows,
- byte[] qualifier, byte[] family) {
+ List<KeyValue> getKeyValueSet(long[] timestamps, int numRows, byte[] qualifier, byte[] family) {
List<KeyValue> kvList = new ArrayList<>();
- for (int i=1;i<=numRows;i++) {
- byte[] b = Bytes.toBytes(i) ;
+ for (int i = 1; i <= numRows; i++) {
+ byte[] b = Bytes.toBytes(i);
LOG.info(Bytes.toString(b));
LOG.info(Bytes.toString(b));
- for (long timestamp: timestamps) {
+ for (long timestamp : timestamps) {
kvList.add(new KeyValue(b, family, qualifier, timestamp, b));
}
}
@@ -896,7 +868,7 @@ public class TestHStoreFile extends HBaseTestCase {
byte[] family = Bytes.toBytes("familyname");
byte[] qualifier = Bytes.toBytes("qualifier");
int numRows = 10;
- long[] timestamps = new long[] {20,10,5,1};
+ long[] timestamps = new long[] { 20, 10, 5, 1 };
Scan scan = new Scan();
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
@@ -905,12 +877,9 @@ public class TestHStoreFile extends HBaseTestCase {
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withOutputDir(dir)
- .withFileContext(meta)
- .build();
+ .withOutputDir(dir).withFileContext(meta).build();
- List<KeyValue> kvList = getKeyValueSet(timestamps,numRows,
- qualifier, family);
+ List<KeyValue> kvList = getKeyValueSet(timestamps, numRows, qualifier, family);
for (KeyValue kv : kvList) {
writer.append(kv);
@@ -918,8 +887,8 @@ public class TestHStoreFile extends HBaseTestCase {
writer.appendMetadata(0, false);
writer.close();
- HStoreFile hsf = new HStoreFile(this.fs, writer.getPath(), conf, cacheConf,
- BloomType.NONE, true);
+ HStoreFile hsf =
+ new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true);
HStore store = mock(HStore.class);
when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of(family));
hsf.initReader();
@@ -958,7 +927,7 @@ public class TestHStoreFile extends HBaseTestCase {
Configuration conf = this.conf;
// Find a home for our files (regiondir ("7e0102") and familyname).
- Path baseDir = new Path(new Path(testDir, "7e0102"),"twoCOWEOC");
+ Path baseDir = new Path(new Path(testDir, "7e0102"), "twoCOWEOC");
// Grab the block cache and get the initial hit/miss counts
BlockCache bc = BlockCacheFactory.createBlockCache(conf);
@@ -973,8 +942,8 @@ public class TestHStoreFile extends HBaseTestCase {
CacheConfig cacheConf = new CacheConfig(conf, bc);
Path pathCowOff = new Path(baseDir, "123456789");
StoreFileWriter writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
- HStoreFile hsf = new HStoreFile(this.fs, writer.getPath(), conf, cacheConf,
- BloomType.NONE, true);
+ HStoreFile hsf =
+ new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true);
LOG.debug(hsf.getPath().toString());
// Read this file, we should see 3 misses
@@ -998,8 +967,7 @@ public class TestHStoreFile extends HBaseTestCase {
cacheConf = new CacheConfig(conf, bc);
Path pathCowOn = new Path(baseDir, "123456788");
writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
- hsf = new HStoreFile(this.fs, writer.getPath(), conf, cacheConf,
- BloomType.NONE, true);
+ hsf = new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true);
// Read this file, we should see 3 hits
hsf.initReader();
@@ -1036,12 +1004,10 @@ public class TestHStoreFile extends HBaseTestCase {
assertTrue(kv1.equals(kv2));
KeyValue keyv1 = KeyValueUtil.ensureKeyValue(kv1);
KeyValue keyv2 = KeyValueUtil.ensureKeyValue(kv2);
- assertTrue(Bytes.compareTo(
- keyv1.getBuffer(), keyv1.getKeyOffset(), keyv1.getKeyLength(),
- keyv2.getBuffer(), keyv2.getKeyOffset(), keyv2.getKeyLength()) == 0);
- assertTrue(Bytes.compareTo(
- kv1.getValueArray(), kv1.getValueOffset(), kv1.getValueLength(),
- kv2.getValueArray(), kv2.getValueOffset(), kv2.getValueLength()) == 0);
+ assertTrue(Bytes.compareTo(keyv1.getBuffer(), keyv1.getKeyOffset(), keyv1.getKeyLength(),
+ keyv2.getBuffer(), keyv2.getKeyOffset(), keyv2.getKeyLength()) == 0);
+ assertTrue(Bytes.compareTo(kv1.getValueArray(), kv1.getValueOffset(), kv1.getValueLength(),
+ kv2.getValueArray(), kv2.getValueOffset(), kv2.getValueLength()) == 0);
}
assertNull(scannerTwo.next());
assertEquals(startHit + 6, cs.getHitCount());
@@ -1082,9 +1048,8 @@ public class TestHStoreFile extends HBaseTestCase {
}
private Path splitStoreFile(final HRegionFileSystem regionFs, final RegionInfo hri,
- final String family, final HStoreFile sf, final byte[] splitKey, boolean isTopRef)
- throws IOException {
- FileSystem fs = regionFs.getFileSystem();
+ final String family, final HStoreFile sf, final byte[] splitKey, boolean isTopRef)
+ throws IOException {
Path path = regionFs.splitStoreFile(hri, family, sf, splitKey, isTopRef, null);
if (null == path) {
return null;
@@ -1094,31 +1059,26 @@ public class TestHStoreFile extends HBaseTestCase {
}
private StoreFileWriter writeStoreFile(Configuration conf, CacheConfig cacheConf, Path path,
- int numBlocks) throws IOException {
+ int numBlocks) throws IOException {
// Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs
int numKVs = 5 * numBlocks;
List<KeyValue> kvs = new ArrayList<>(numKVs);
- byte [] b = Bytes.toBytes("x");
+ byte[] b = Bytes.toBytes("x");
int totalSize = 0;
- for (int i=numKVs;i>0;i--) {
+ for (int i = numKVs; i > 0; i--) {
KeyValue kv = new KeyValue(b, b, b, i, b);
kvs.add(kv);
// kv has memstoreTS 0, which takes 1 byte to store.
totalSize += kv.getLength() + 1;
}
int blockSize = totalSize / numBlocks;
- HFileContext meta = new HFileContextBuilder().withBlockSize(blockSize)
- .withChecksumType(CKTYPE)
- .withBytesPerCheckSum(CKBYTES)
- .build();
+ HFileContext meta = new HFileContextBuilder().withBlockSize(blockSize).withChecksumType(CKTYPE)
+ .withBytesPerCheckSum(CKBYTES).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withFilePath(path)
- .withMaxKeyCount(2000)
- .withFileContext(meta)
- .build();
+ .withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build();
// We'll write N-1 KVs to ensure we don't write an extra block
- kvs.remove(kvs.size()-1);
+ kvs.remove(kvs.size() - 1);
for (KeyValue kv : kvs) {
writer.append(kv);
}
@@ -1128,8 +1088,7 @@ public class TestHStoreFile extends HBaseTestCase {
}
/**
- * Check if data block encoding information is saved correctly in HFile's
- * file info.
+ * Check if data block encoding information is saved correctly in HFile's file info.
*/
@Test
public void testDataBlockEncodingMetaData() throws IOException {
@@ -1137,32 +1096,23 @@ public class TestHStoreFile extends HBaseTestCase {
Path dir = new Path(new Path(testDir, "7e0102"), "familyname");
Path path = new Path(dir, "1234567890");
- DataBlockEncoding dataBlockEncoderAlgo =
- DataBlockEncoding.FAST_DIFF;
- HFileDataBlockEncoder dataBlockEncoder =
- new HFileDataBlockEncoderImpl(
- dataBlockEncoderAlgo);
+ DataBlockEncoding dataBlockEncoderAlgo = DataBlockEncoding.FAST_DIFF;
cacheConf = new CacheConfig(conf);
- HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
- .withChecksumType(CKTYPE)
- .withBytesPerCheckSum(CKBYTES)
- .withDataBlockEncoding(dataBlockEncoderAlgo)
- .build();
+ HFileContext meta =
+ new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE)
+ .withBytesPerCheckSum(CKBYTES).withDataBlockEncoding(dataBlockEncoderAlgo).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
- .withFilePath(path)
- .withMaxKeyCount(2000)
- .withFileContext(meta)
- .build();
+ .withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build();
writer.close();
HStoreFile storeFile =
- new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true);
+ new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true);
storeFile.initReader();
StoreFileReader reader = storeFile.getReader();
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
- assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
+ assertArrayEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java
index 839bcaa..a91bdd6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java
@@ -17,6 +17,10 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -24,23 +28,21 @@ import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CollectionBackedScanner;
-import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-@Category({RegionServerTests.class, SmallTests.class})
-public class TestKeyValueHeap extends HBaseTestCase {
+@Category({ RegionServerTests.class, SmallTests.class })
+public class TestKeyValueHeap {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestKeyValueHeap.class);
+ HBaseClassTestRule.forClass(TestKeyValueHeap.class);
private byte[] row1 = Bytes.toBytes("row1");
private byte[] fam1 = Bytes.toBytes("fam1");
@@ -74,67 +76,59 @@ public class TestKeyValueHeap extends HBaseTestCase {
List<KeyValueScanner> scanners = new ArrayList<>(Arrays.asList(s1, s2, s3));
/*
- * Uses {@code scanners} to build a KeyValueHeap, iterates over it and asserts that returned
- * Cells are same as {@code expected}.
+ * Uses {@code scanners} to build a KeyValueHeap, iterates over it and asserts that returned Cells
+ * are same as {@code expected}.
* @return List of Cells returned from scanners.
*/
public List<Cell> assertCells(List<Cell> expected, List<KeyValueScanner> scanners)
- throws IOException {
- //Creating KeyValueHeap
- KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR);
+ throws IOException {
+ // Creating KeyValueHeap
+ try (KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR)) {
+ List<Cell> actual = new ArrayList<>();
+ while (kvh.peek() != null) {
+ actual.add(kvh.next());
+ }
- List<Cell> actual = new ArrayList<>();
- while(kvh.peek() != null){
- actual.add(kvh.next());
+ assertEquals(expected, actual);
+ return actual;
}
-
- assertEquals(expected, actual);
- return actual;
- }
-
- @Override
- @Before
- public void setUp() throws Exception {
- super.setUp();
}
@Test
- public void testSorted() throws IOException{
- //Cases that need to be checked are:
- //1. The "smallest" Cell is in the same scanners as current
- //2. Current scanner gets empty
+ public void testSorted() throws IOException {
+ // Cases that need to be checked are:
+ // 1. The "smallest" Cell is in the same scanners as current
+ // 2. Current scanner gets empty
- List<Cell> expected = Arrays.asList(
- kv111, kv112, kv113, kv114, kv115, kv121, kv122, kv211, kv212, kv213);
+ List<Cell> expected =
+ Arrays.asList(kv111, kv112, kv113, kv114, kv115, kv121, kv122, kv211, kv212, kv213);
List<Cell> actual = assertCells(expected, scanners);
- //Check if result is sorted according to Comparator
- for(int i=0; i<actual.size()-1; i++){
- int ret = CellComparatorImpl.COMPARATOR.compare(actual.get(i), actual.get(i+1));
+ // Check if result is sorted according to Comparator
+ for (int i = 0; i < actual.size() - 1; i++) {
+ int ret = CellComparatorImpl.COMPARATOR.compare(actual.get(i), actual.get(i + 1));
assertTrue(ret < 0);
}
}
@Test
public void testSeek() throws IOException {
- //Cases:
- //1. Seek Cell that is not in scanner
- //2. Check that smallest that is returned from a seek is correct
-
+ // Cases:
+ // 1. Seek Cell that is not in scanner
+ // 2. Check that smallest that is returned from a seek is correct
List<Cell> expected = Arrays.asList(kv211);
- //Creating KeyValueHeap
- KeyValueHeap kvh =
- new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR);
-
- Cell seekKv = new KeyValue(row2, fam1, null, null);
- kvh.seek(seekKv);
+ // Creating KeyValueHeap
+ try (KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR)) {
+ Cell seekKv = new KeyValue(row2, fam1, null, null);
+ kvh.seek(seekKv);
- List<Cell> actual = Arrays.asList(kvh.peek());
+ List<Cell> actual = Arrays.asList(kvh.peek());
- assertEquals("Expected = " + Arrays.toString(expected.toArray())
- + "\n Actual = " + Arrays.toString(actual.toArray()), expected, actual);
+ assertEquals("Expected = " + Arrays.toString(expected.toArray()) + "\n Actual = " +
+ Arrays.toString(actual.toArray()), expected, actual);
+ }
}
@Test
@@ -144,20 +138,25 @@ public class TestKeyValueHeap extends HBaseTestCase {
TestScanner s4 = new TestScanner(new ArrayList<>());
scanners.add(s4);
- //Creating KeyValueHeap
- KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR);
-
- while(kvh.next() != null);
- // Once the internal scanners go out of Cells, those will be removed from KVHeap's priority
- // queue and added to a Set for lazy close. The actual close will happen only on KVHeap#close()
- assertEquals(4, kvh.scannersForDelayedClose.size());
- assertTrue(kvh.scannersForDelayedClose.contains(s1));
- assertTrue(kvh.scannersForDelayedClose.contains(s2));
- assertTrue(kvh.scannersForDelayedClose.contains(s3));
- assertTrue(kvh.scannersForDelayedClose.contains(s4));
- kvh.close();
- for(KeyValueScanner scanner : scanners) {
- assertTrue(((TestScanner)scanner).isClosed());
+ // Creating KeyValueHeap
+ try (KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR)) {
+ for (;;) {
+ if (kvh.next() == null) {
+ break;
+ }
+ }
+ // Once the internal scanners go out of Cells, those will be removed from KVHeap's priority
+ // queue and added to a Set for lazy close. The actual close will happen only on
+ // KVHeap#close()
+ assertEquals(4, kvh.scannersForDelayedClose.size());
+ assertTrue(kvh.scannersForDelayedClose.contains(s1));
+ assertTrue(kvh.scannersForDelayedClose.contains(s2));
+ assertTrue(kvh.scannersForDelayedClose.contains(s3));
+ assertTrue(kvh.scannersForDelayedClose.contains(s4));
+ }
+
+ for (KeyValueScanner scanner : scanners) {
+ assertTrue(((TestScanner) scanner).isClosed());
}
}
@@ -173,19 +172,19 @@ public class TestKeyValueHeap extends HBaseTestCase {
List<KeyValueScanner> scanners = new ArrayList<>(Arrays.asList(s1, s2, s3, s4));
// Creating KeyValueHeap
- KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR);
-
- try {
+ try (KeyValueHeap kvh = new KeyValueHeap(scanners, CellComparatorImpl.COMPARATOR)) {
for (KeyValueScanner scanner : scanners) {
((SeekTestScanner) scanner).setRealSeekDone(false);
}
- while (kvh.next() != null);
// The pollRealKV should throw IOE.
- assertTrue(false);
- } catch (IOException ioe) {
- kvh.close();
+ assertThrows(IOException.class, () -> {
+ for (;;) {
+ if (kvh.next() == null) {
+ break;
+ }
+ }
+ });
}
-
// It implies there is no NPE thrown from kvh.close() if getting here
for (KeyValueScanner scanner : scanners) {
// Verify that close is called and only called once for each scanner
@@ -198,18 +197,15 @@ public class TestKeyValueHeap extends HBaseTestCase {
public void testPriorityId() throws IOException {
Cell kv113A = new KeyValue(row1, fam1, col3, Bytes.toBytes("aaa"));
Cell kv113B = new KeyValue(row1, fam1, col3, Bytes.toBytes("bbb"));
- {
- TestScanner scan1 = new TestScanner(Arrays.asList(kv111, kv112, kv113A), 1);
- TestScanner scan2 = new TestScanner(Arrays.asList(kv113B), 2);
- List<Cell> expected = Arrays.asList(kv111, kv112, kv113B, kv113A);
- assertCells(expected, new ArrayList<>(Arrays.asList(scan1, scan2)));
- }
- {
- TestScanner scan1 = new TestScanner(Arrays.asList(kv111, kv112, kv113A), 2);
- TestScanner scan2 = new TestScanner(Arrays.asList(kv113B), 1);
- List<Cell> expected = Arrays.asList(kv111, kv112, kv113A, kv113B);
- assertCells(expected, new ArrayList<>(Arrays.asList(scan1, scan2)));
- }
+ TestScanner scan1 = new TestScanner(Arrays.asList(kv111, kv112, kv113A), 1);
+ TestScanner scan2 = new TestScanner(Arrays.asList(kv113B), 2);
+ List<Cell> expected = Arrays.asList(kv111, kv112, kv113B, kv113A);
+ assertCells(expected, Arrays.asList(scan1, scan2));
+
+ scan1 = new TestScanner(Arrays.asList(kv111, kv112, kv113A), 2);
+ scan2 = new TestScanner(Arrays.asList(kv113B), 1);
+ expected = Arrays.asList(kv111, kv112, kv113A, kv113B);
+ assertCells(expected, Arrays.asList(scan1, scan2));
}
private static class TestScanner extends CollectionBackedScanner {
@@ -231,7 +227,7 @@ public class TestKeyValueHeap extends HBaseTestCase {
}
@Override
- public void close(){
+ public void close() {
closed = true;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
index 7db7741..59a2413 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
@@ -37,11 +37,11 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HTestConst;
import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
@@ -76,28 +76,30 @@ import org.slf4j.LoggerFactory;
/**
* Test major compactions
*/
-@Category({RegionServerTests.class, LargeTests.class})
+@Category({ RegionServerTests.class, LargeTests.class })
@RunWith(Parameterized.class)
public class TestMajorCompaction {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestMajorCompaction.class);
+ HBaseClassTestRule.forClass(TestMajorCompaction.class);
@Parameterized.Parameters
public static Object[] data() {
return new Object[] { "NONE", "BASIC", "EAGER" };
}
- @Rule public TestName name;
+
+ @Rule
+ public TestName name;
private static final Logger LOG = LoggerFactory.getLogger(TestMajorCompaction.class.getName());
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
protected Configuration conf = UTIL.getConfiguration();
private HRegion r = null;
private HTableDescriptor htd = null;
- private static final byte [] COLUMN_FAMILY = fam1;
- private final byte [] STARTROW = Bytes.toBytes(START_KEY);
- private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
+ private static final byte[] COLUMN_FAMILY = fam1;
+ private final byte[] STARTROW = Bytes.toBytes(START_KEY);
+ private static final byte[] COLUMN_FAMILY_TEXT = COLUMN_FAMILY;
private int compactionThreshold;
private byte[] secondRowBytes, thirdRowBytes;
private static final long MAX_FILES_TO_COMPACT = 10;
@@ -107,7 +109,7 @@ public class TestMajorCompaction {
super();
name = new TestName();
// Set cache flush size to 1MB
- conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
+ conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compType));
@@ -117,13 +119,13 @@ public class TestMajorCompaction {
secondRowBytes[START_KEY_BYTES.length - 1]++;
thirdRowBytes = START_KEY_BYTES.clone();
thirdRowBytes[START_KEY_BYTES.length - 1] =
- (byte) (thirdRowBytes[START_KEY_BYTES.length - 1] + 2);
+ (byte) (thirdRowBytes[START_KEY_BYTES.length - 1] + 2);
}
@Before
public void setUp() throws Exception {
this.htd = UTIL.createTableDescriptor(
- TableName.valueOf(name.getMethodName().replace('[','i').replace(']','i')),
+ TableName.valueOf(name.getMethodName().replace('[', 'i').replace(']', 'i')),
HColumnDescriptor.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER,
HColumnDescriptor.DEFAULT_KEEP_DELETED);
this.r = UTIL.createLocalHRegion(htd, null, null);
@@ -131,15 +133,15 @@ public class TestMajorCompaction {
@After
public void tearDown() throws Exception {
- WAL wal = ((HRegion)r).getWAL();
- ((HRegion)r).close();
+ WAL wal = ((HRegion) r).getWAL();
+ ((HRegion) r).close();
wal.close();
}
/**
- * Test that on a major compaction, if all cells are expired or deleted, then
- * we'll end up with no product. Make sure scanner over region returns
- * right answer in this case - and that it just basically works.
+ * Test that on a major compaction, if all cells are expired or deleted, then we'll end up with no
+ * product. Make sure scanner over region returns right answer in this case - and that it just
+ * basically works.
* @throws IOException exception encountered
*/
@Test
@@ -157,8 +159,7 @@ public class TestMajorCompaction {
}
/**
- * Run compaction and flushing memstore
- * Assert deletes get cleaned up.
+ * Run compaction and flushing memstore Assert deletes get cleaned up.
* @throws Exception
*/
@Test
@@ -176,23 +177,21 @@ public class TestMajorCompaction {
majorCompactionWithDataBlockEncoding(false);
}
- public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
- throws Exception {
+ public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly) throws Exception {
Map<HStore, HFileDataBlockEncoder> replaceBlockCache = new HashMap<>();
for (HStore store : r.getStores()) {
HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
replaceBlockCache.put(store, blockEncoder);
final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
- final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
- inCache;
- ((HStore)store).setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
+ final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE : inCache;
+ ((HStore) store).setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
}
majorCompaction();
// restore settings
for (Entry<HStore, HFileDataBlockEncoder> entry : replaceBlockCache.entrySet()) {
- ((HStore)entry.getKey()).setDataBlockEncoderInTest(entry.getValue());
+ ((HStore) entry.getKey()).setDataBlockEncoderInTest(entry.getValue());
}
}
@@ -202,7 +201,7 @@ public class TestMajorCompaction {
createStoreFile(r);
}
// Add more content.
- HBaseTestCase.addContent(new RegionAsTable(r), Bytes.toString(COLUMN_FAMILY));
+ HTestConst.addContent(new RegionAsTable(r), Bytes.toString(COLUMN_FAMILY));
// Now there are about 5 versions of each column.
// Default is that there only 3 (MAXVERSIONS) versions allowed per column.
@@ -223,7 +222,7 @@ public class TestMajorCompaction {
int storeCount = 0;
for (HStore store : r.getStores()) {
CompactionProgress progress = store.getCompactionProgress();
- if( progress != null ) {
+ if (progress != null) {
++storeCount;
assertTrue(progress.currentCompactedKVs > 0);
assertTrue(progress.getTotalCompactingKVs() > 0);
@@ -233,25 +232,24 @@ public class TestMajorCompaction {
// look at the second row
// Increment the least significant character so we get to next row.
- byte [] secondRowBytes = START_KEY_BYTES.clone();
+ byte[] secondRowBytes = START_KEY_BYTES.clone();
secondRowBytes[START_KEY_BYTES.length - 1]++;
// Always 3 versions if that is what max versions is.
result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).readVersions(100));
- LOG.debug("Row " + Bytes.toStringBinary(secondRowBytes) + " after " +
- "initial compaction: " + result);
- assertEquals("Invalid number of versions of row "
- + Bytes.toStringBinary(secondRowBytes) + ".", compactionThreshold,
- result.size());
+ LOG.debug(
+ "Row " + Bytes.toStringBinary(secondRowBytes) + " after " + "initial compaction: " + result);
+ assertEquals("Invalid number of versions of row " + Bytes.toStringBinary(secondRowBytes) + ".",
+ compactionThreshold, result.size());
// Now add deletes to memstore and then flush it.
// That will put us over
- // the compaction threshold of 3 store files. Compacting these store files
+ // the compaction threshold of 3 store files. Compacting these store files
// should result in a compacted store file that has no references to the
// deleted row.
LOG.debug("Adding deletes to memstore and flushing");
Delete delete = new Delete(secondRowBytes, System.currentTimeMillis());
- byte [][] famAndQf = {COLUMN_FAMILY, null};
+ byte[][] famAndQf = { COLUMN_FAMILY, null };
delete.addFamily(famAndQf[0]);
r.delete(delete);
@@ -264,7 +262,7 @@ public class TestMajorCompaction {
result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).readVersions(100));
assertTrue("Second row should have been deleted", result.isEmpty());
- // Add a bit of data and flush. Start adding at 'bbb'.
+ // Add a bit of data and flush. Start adding at 'bbb'.
createSmallerStoreFile(this.r);
r.flush(true);
// Assert that the second row is still deleted.
@@ -281,7 +279,7 @@ public class TestMajorCompaction {
// Make sure the store files do have some 'aaa' keys in them -- exactly 3.
// Also, that compacted store files do not have any secondRowBytes because
// they were deleted.
- verifyCounts(3,0);
+ verifyCounts(3, 0);
// Multiple versions allowed for an entry, so the delete isn't enough
// Lower TTL and expire to ensure that all our entries have been wiped
@@ -319,8 +317,8 @@ public class TestMajorCompaction {
assertEquals(2, s.getStorefilesCount());
// ensure that major compaction time is deterministic
- RatioBasedCompactionPolicy
- c = (RatioBasedCompactionPolicy)s.storeEngine.getCompactionPolicy();
+ RatioBasedCompactionPolicy c =
+ (RatioBasedCompactionPolicy) s.storeEngine.getCompactionPolicy();
Collection<HStoreFile> storeFiles = s.getStorefiles();
long mcTime = c.getNextMajorCompactTime(storeFiles);
for (int i = 0; i < 10; ++i) {
@@ -339,7 +337,7 @@ public class TestMajorCompaction {
assertEquals(1, s.getStorefilesCount());
} finally {
// reset the timed compaction settings
- conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
+ conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000 * 60 * 60 * 24);
conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
// run a major to reset the cache
createStoreFile(r);
@@ -351,33 +349,32 @@ public class TestMajorCompaction {
private void verifyCounts(int countRow1, int countRow2) throws Exception {
int count1 = 0;
int count2 = 0;
- for (HStoreFile f: r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) {
+ for (HStoreFile f : r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) {
HFileScanner scanner = f.getReader().getScanner(false, false);
scanner.seekTo();
do {
- byte [] row = CellUtil.cloneRow(scanner.getCell());
+ byte[] row = CellUtil.cloneRow(scanner.getCell());
if (Bytes.equals(row, STARTROW)) {
count1++;
- } else if(Bytes.equals(row, secondRowBytes)) {
+ } else if (Bytes.equals(row, secondRowBytes)) {
count2++;
}
- } while(scanner.next());
+ } while (scanner.next());
}
- assertEquals(countRow1,count1);
- assertEquals(countRow2,count2);
+ assertEquals(countRow1, count1);
+ assertEquals(countRow2, count2);
}
-
private int count() throws IOException {
int count = 0;
- for (HStoreFile f: r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) {
+ for (HStoreFile f : r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) {
HFileScanner scanner = f.getReader().getScanner(false, false);
if (!scanner.seekTo()) {
continue;
}
do {
count++;
- } while(scanner.next());
+ } while (scanner.next());
}
return count;
}
@@ -388,14 +385,13 @@ public class TestMajorCompaction {
private void createStoreFile(final HRegion region, String family) throws IOException {
Table loader = new RegionAsTable(region);
- HBaseTestCase.addContent(loader, family);
+ HTestConst.addContent(loader, family);
region.flush(true);
}
private void createSmallerStoreFile(final HRegion region) throws IOException {
Table loader = new RegionAsTable(region);
- HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), Bytes.toBytes("" +
- "bbb"), null);
+ HTestConst.addContent(loader, Bytes.toString(COLUMN_FAMILY), Bytes.toBytes("" + "bbb"), null);
region.flush(true);
}
@@ -414,8 +410,7 @@ public class TestMajorCompaction {
CompactionRequestImpl request = store.requestCompaction().get().getRequest();
assertNotNull("Expected to receive a compaction request", request);
assertEquals(
- "System-requested major compaction should not occur if there are too many store files",
- false,
+ "System-requested major compaction should not occur if there are too many store files", false,
request.isMajor());
}
@@ -423,21 +418,19 @@ public class TestMajorCompaction {
* Test for HBASE-5920
*/
@Test
- public void testUserMajorCompactionRequest() throws IOException{
+ public void testUserMajorCompactionRequest() throws IOException {
HStore store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
store.triggerMajorCompaction();
- CompactionRequestImpl request =
- store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null).get()
- .getRequest();
+ CompactionRequestImpl request = store
+ .requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null).get().getRequest();
assertNotNull("Expected to receive a compaction request", request);
assertEquals(
"User-requested major compaction should always occur, even if there are too many store files",
- true,
- request.isMajor());
+ true, request.isMajor());
}
/**
@@ -487,7 +480,7 @@ public class TestMajorCompaction {
}
private void testMajorCompactingWithDeletes(KeepDeletedCells keepDeletedCells)
- throws IOException {
+ throws IOException {
createStoreFile(r);
for (int i = 0; i < compactionThreshold; i++) {
createStoreFile(r);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
index 2c3c010..9f916a5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
@@ -22,13 +22,14 @@ import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HTestConst;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
@@ -174,13 +175,13 @@ public class TestMinorCompaction {
throws Exception {
Table loader = new RegionAsTable(r);
for (int i = 0; i < compactionThreshold + 1; i++) {
- HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes,
+ HTestConst.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes,
thirdRowBytes, i);
- HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes,
+ HTestConst.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes,
thirdRowBytes, i);
- HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes,
+ HTestConst.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes,
thirdRowBytes, i);
- HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes,
+ HTestConst.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes,
thirdRowBytes, i);
r.flush(true);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
index 23d4db5..b5cfcb3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
@@ -32,10 +32,10 @@ import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HTestConst;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -133,7 +133,7 @@ public class TestScanner {
byte [] stoprow = Bytes.toBytes("ccc");
try {
this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
- HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY);
+ HTestConst.addContent(this.region, HConstants.CATALOG_FAMILY);
List<Cell> results = new ArrayList<>();
// Do simple test of getting one row only first.
Scan scan = new Scan().withStartRow(Bytes.toBytes("abc"))
@@ -207,7 +207,7 @@ public class TestScanner {
public void testFilters() throws IOException {
try {
this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
- HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY);
+ HTestConst.addContent(this.region, HConstants.CATALOG_FAMILY);
byte [] prefix = Bytes.toBytes("ab");
Filter newFilter = new PrefixFilter(prefix);
Scan scan = new Scan();
@@ -233,7 +233,7 @@ public class TestScanner {
public void testRaceBetweenClientAndTimeout() throws Exception {
try {
this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
- HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY);
+ HTestConst.addContent(this.region, HConstants.CATALOG_FAMILY);
Scan scan = new Scan();
InternalScanner s = region.getScanner(scan);
List<Cell> results = new ArrayList<>();
@@ -465,7 +465,7 @@ public class TestScanner {
Table hri = new RegionAsTable(region);
try {
LOG.info("Added: " +
- HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
+ HTestConst.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
int count = count(hri, -1, false);
assertEquals(count, count(hri, 100, false)); // do a sync flush.
@@ -487,7 +487,7 @@ public class TestScanner {
Table hri = new RegionAsTable(region);
try {
LOG.info("Added: " +
- HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
+ HTestConst.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
int count = count(hri, -1, false);
assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
@@ -513,9 +513,9 @@ public class TestScanner {
Table hri = new RegionAsTable(region);
try {
- HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
+ HTestConst.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
firstRowBytes, secondRowBytes);
- HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
+ HTestConst.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
firstRowBytes, secondRowBytes);
Delete dc = new Delete(firstRowBytes);
@@ -524,9 +524,9 @@ public class TestScanner {
region.delete(dc);
region.flush(true);
- HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
+ HTestConst.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
secondRowBytes, thirdRowBytes);
- HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
+ HTestConst.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
secondRowBytes, thirdRowBytes);
region.flush(true);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
index bc1ea58..fca371f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
@@ -17,61 +17,87 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
-import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-@Category({RegionServerTests.class, SmallTests.class})
-public class TestWideScanner extends HBaseTestCase {
+@Category({ RegionServerTests.class, SmallTests.class })
+public class TestWideScanner {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestWideScanner.class);
+ HBaseClassTestRule.forClass(TestWideScanner.class);
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final Logger LOG = LoggerFactory.getLogger(TestWideScanner.class);
- static final byte[] A = Bytes.toBytes("A");
- static final byte[] B = Bytes.toBytes("B");
- static final byte[] C = Bytes.toBytes("C");
- static byte[][] COLUMNS = { A, B, C };
- static final Random rng = new Random();
- static final TableDescriptorBuilder.ModifyableTableDescriptor TESTTABLEDESC =
- new TableDescriptorBuilder.ModifyableTableDescriptor(TableName.valueOf("testwidescan"));
+ private static final byte[] A = Bytes.toBytes("A");
+ private static final byte[] B = Bytes.toBytes("B");
+ private static final byte[] C = Bytes.toBytes("C");
+ private static byte[][] COLUMNS = { A, B, C };
+
+ private static final TableDescriptor TESTTABLEDESC;
static {
+ TableDescriptorBuilder builder =
+ TableDescriptorBuilder.newBuilder(TableName.valueOf("testwidescan"));
for (byte[] cfName : new byte[][] { A, B, C }) {
- TESTTABLEDESC.setColumnFamily(
- new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(cfName)
- // Keep versions to help debugging.
- .setMaxVersions(100)
- .setBlocksize(8 * 1024)
- );
+ // Keep versions to help debugging.
+ builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName).setMaxVersions(100)
+ .setBlocksize(8 * 1024).build());
}
+ TESTTABLEDESC = builder.build();
}
/** HRegionInfo for root region */
- HRegion r;
+ private static HRegion REGION;
+
+ @BeforeClass
+ public static void setUp() throws IOException {
+ Path testDir = UTIL.getDataTestDir();
+ RegionInfo hri = RegionInfoBuilder.newBuilder(TESTTABLEDESC.getTableName()).build();
+ REGION =
+ HBaseTestingUtility.createRegionAndWAL(hri, testDir, UTIL.getConfiguration(), TESTTABLEDESC);
+ }
+
+ @AfterClass
+ public static void tearDown() throws IOException {
+ if (REGION != null) {
+ HBaseTestingUtility.closeRegionAndWAL(REGION);
+ REGION = null;
+ }
+ UTIL.cleanupTestDir();
+ }
private int addWideContent(HRegion region) throws IOException {
int count = 0;
@@ -85,7 +111,7 @@ public class TestWideScanner extends HBaseTestCase {
Put put = new Put(row);
put.setDurability(Durability.SKIP_WAL);
long ts1 = ++ts;
- put.addColumn(COLUMNS[rng.nextInt(COLUMNS.length)], b, ts1, b);
+ put.addColumn(COLUMNS[ThreadLocalRandom.current().nextInt(COLUMNS.length)], b, ts1, b);
region.put(put);
count++;
}
@@ -97,17 +123,15 @@ public class TestWideScanner extends HBaseTestCase {
@Test
public void testWideScanBatching() throws IOException {
final int batch = 256;
- try {
- this.r = createNewHRegion(TESTTABLEDESC, null, null);
- int inserted = addWideContent(this.r);
- List<Cell> results = new ArrayList<>();
- Scan scan = new Scan();
- scan.addFamily(A);
- scan.addFamily(B);
- scan.addFamily(C);
- scan.readVersions(100);
- scan.setBatch(batch);
- InternalScanner s = r.getScanner(scan);
+ int inserted = addWideContent(REGION);
+ List<Cell> results = new ArrayList<>();
+ Scan scan = new Scan();
+ scan.addFamily(A);
+ scan.addFamily(B);
+ scan.addFamily(C);
+ scan.readVersions(100);
+ scan.setBatch(batch);
+ try (InternalScanner s = REGION.getScanner(scan)) {
int total = 0;
int i = 0;
boolean more;
@@ -124,7 +148,7 @@ public class TestWideScanner extends HBaseTestCase {
if (results.size() > 0) {
// assert that all results are from the same row
byte[] row = CellUtil.cloneRow(results.get(0));
- for (Cell kv: results) {
+ for (Cell kv : results) {
assertTrue(Bytes.equals(row, CellUtil.cloneRow(kv)));
}
}
@@ -133,22 +157,16 @@ public class TestWideScanner extends HBaseTestCase {
// trigger ChangedReadersObservers
Iterator<KeyValueScanner> scanners =
- ((HRegion.RegionScannerImpl)s).storeHeap.getHeap().iterator();
+ ((HRegion.RegionScannerImpl) s).storeHeap.getHeap().iterator();
while (scanners.hasNext()) {
- StoreScanner ss = (StoreScanner)scanners.next();
- ss.updateReaders(Collections.EMPTY_LIST, Collections.EMPTY_LIST);
+ StoreScanner ss = (StoreScanner) scanners.next();
+ ss.updateReaders(Collections.emptyList(), Collections.emptyList());
}
} while (more);
// assert that the scanner returned all values
LOG.info("inserted " + inserted + ", scanned " + total);
assertEquals(total, inserted);
-
- s.close();
- } finally {
- HBaseTestingUtility.closeRegionAndWAL(this.r);
}
}
-
}
-