You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2018/08/16 22:20:07 UTC
[1/8] hbase git commit: HBASE-20940 HStore.cansplit should not allow
split to happen if it has references (Vishal Khandelwal)
Repository: hbase
Updated Branches:
refs/heads/branch-1 5520fa1ab -> 971d48406
refs/heads/branch-1.3 fe55991b8 -> efafa4108
refs/heads/branch-1.4 472a13aaf -> 9e5a1cb2b
refs/heads/branch-2 75939775a -> 1fa67725c
refs/heads/branch-2.0 66489b504 -> 7564efaf9
refs/heads/branch-2.1 145c92f3d -> b49941012
refs/heads/master 2cfe1e8ae -> 1dbd6fa99
HBASE-20940 HStore.cansplit should not allow split to happen if it has references (Vishal Khandelwal)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/252f1bc5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/252f1bc5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/252f1bc5
Branch: refs/heads/branch-2.0
Commit: 252f1bc5de68ced75b062d7155a3e7b464e868a0
Parents: 66489b5
Author: Andrew Purtell <ap...@apache.org>
Authored: Tue Aug 7 14:54:29 2018 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed Aug 15 17:52:41 2018 -0700
----------------------------------------------------------------------
.../hadoop/hbase/regionserver/HStore.java | 12 +-
.../client/TestAsyncTableGetMultiThreaded.java | 28 +++-
.../hbase/io/encoding/TestChangingEncoding.java | 4 +
.../TestEndToEndSplitTransaction.java | 136 +++++++++++++++++--
.../TestSplitTransactionOnCluster.java | 37 +++--
5 files changed, 190 insertions(+), 27 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/252f1bc5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 035496f..3943de1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1632,7 +1632,17 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
@Override
public boolean hasReferences() {
- return StoreUtils.hasReferences(this.storeEngine.getStoreFileManager().getStorefiles());
+ List<HStoreFile> reloadedStoreFiles = null;
+ try {
+ // Reloading the store files from file system due to HBASE-20940. As split can happen with an
+ // region which has references
+ reloadedStoreFiles = loadStoreFiles();
+ return StoreUtils.hasReferences(reloadedStoreFiles);
+ } catch (IOException ioe) {
+ LOG.error("Error trying to determine if store has references, assuming references exists",
+ ioe);
+ return true;
+ }
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/252f1bc5/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
index 7632716..8a2dfcc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIM
import static org.apache.hadoop.hbase.master.LoadBalancer.TABLES_ON_MASTER;
import static org.junit.Assert.assertEquals;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -35,18 +34,21 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
+
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.io.ByteBufferPool;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RetryCounter;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -124,7 +126,7 @@ public class TestAsyncTableGetMultiThreaded {
}
@Test
- public void test() throws IOException, InterruptedException, ExecutionException {
+ public void test() throws Exception {
int numThreads = 20;
AtomicBoolean stop = new AtomicBoolean(false);
ExecutorService executor =
@@ -137,9 +139,31 @@ public class TestAsyncTableGetMultiThreaded {
Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
Admin admin = TEST_UTIL.getAdmin();
for (byte[] splitPoint : SPLIT_KEYS) {
+ int oldRegionCount = admin.getRegions(TABLE_NAME).size();
admin.split(TABLE_NAME, splitPoint);
+ TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME).size() > oldRegionCount;
+ }
+
+ @Override
+ public String explainFailure() throws Exception {
+ return "Split has not finished yet";
+ }
+ });
+
for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME)) {
region.compact(true);
+
+ //Waiting for compaction to complete and references are cleaned up
+ RetryCounter retrier = new RetryCounter(30, 1, TimeUnit.SECONDS);
+ while (CompactionState.NONE != admin
+ .getCompactionStateForRegion(region.getRegionInfo().getRegionName())
+ && retrier.shouldRetry()) {
+ retrier.sleepUntilNextRetry();
+ }
+ region.getStores().get(0).closeAndArchiveCompactedFiles();
}
Thread.sleep(5000);
admin.balance(true);
http://git-wip-us.apache.org/repos/asf/hbase/blob/252f1bc5/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
index 1937d80..38313c4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
@@ -107,6 +107,10 @@ public class TestChangingEncoding {
public static void setUpBeforeClass() throws Exception {
// Use a small flush size to create more HFiles.
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
+ // Disabling split to make sure split does not cause modify column to wait which timesout test
+ // sometime
+ conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+ "org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy");
// ((Log4JLogger)RpcServerImplementation.LOG).getLogger().setLevel(Level.TRACE);
// ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.TRACE);
TEST_UTIL.startMiniCluster();
http://git-wip-us.apache.org/repos/asf/hbase/blob/252f1bc5/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index b0302f6..85e9d30 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -24,10 +24,13 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
+import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
+
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ChoreService;
@@ -41,6 +44,8 @@ import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
@@ -49,13 +54,19 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.hadoop.hbase.util.RetryCounter;
import org.apache.hadoop.hbase.util.StoppableImplementation;
import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
+import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
@@ -65,8 +76,6 @@ import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
-
@Category(LargeTests.class)
public class TestEndToEndSplitTransaction {
@@ -92,6 +101,78 @@ public class TestEndToEndSplitTransaction {
TEST_UTIL.shutdownMiniCluster();
}
+
+ /*
+ * This is the test for : HBASE-20940 This test will split the region and try to open an reference
+ * over store file. Once store file has any reference, it makes sure that region can't be split
+ * @throws Exception
+ */
+ @Test
+ public void testCanSplitJustAfterASplit() throws Exception {
+ LOG.info("Starting testCanSplitJustAfterASplit");
+ byte[] fam = Bytes.toBytes("cf_split");
+
+ TableName tableName = TableName.valueOf("CanSplitTable");
+ Table source = TEST_UTIL.getConnection().getTable(tableName);
+ Admin admin = TEST_UTIL.getAdmin();
+ Map<String, StoreFileReader> scanner = Maps.newHashMap();
+
+ try {
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build();
+
+ admin.createTable(htd);
+ TEST_UTIL.loadTable(source, fam);
+ List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
+ regions.get(0).forceSplit(null);
+ admin.split(tableName);
+
+ while (regions.size() <= 1) {
+ regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
+ regions.stream()
+ .forEach(r -> r.getStores().get(0).getStorefiles().stream()
+ .filter(
+ s -> s.isReference() && !scanner.containsKey(r.getRegionInfo().getEncodedName()))
+ .forEach(sf -> {
+ StoreFileReader reader = ((HStoreFile) sf).getReader();
+ reader.getStoreFileScanner(true, false, false, 0, 0, false);
+ scanner.put(r.getRegionInfo().getEncodedName(), reader);
+ LOG.info("Got reference to file = " + sf.getPath() + ",for region = "
+ + r.getRegionInfo().getEncodedName());
+ }));
+ }
+
+ Assert.assertTrue("Regions did not split properly", regions.size() > 1);
+ Assert.assertTrue("Could not get reference any of the store file", scanner.size() > 1);
+
+ RetryCounter retrier = new RetryCounter(30, 1, TimeUnit.SECONDS);
+ while (CompactionState.NONE != admin.getCompactionState(tableName) && retrier.shouldRetry()) {
+ retrier.sleepUntilNextRetry();
+ }
+
+ Assert.assertEquals("Compaction did not complete in 30 secs", CompactionState.NONE,
+ admin.getCompactionState(tableName));
+
+ regions.stream()
+ .filter(region -> scanner.containsKey(region.getRegionInfo().getEncodedName()))
+ .forEach(r -> Assert.assertTrue("Contains an open file reference which can be split",
+ !r.getStores().get(0).canSplit()));
+ } finally {
+ scanner.values().stream().forEach(s -> {
+ try {
+ s.close(true);
+ } catch (IOException ioe) {
+ LOG.error("Failed while closing store file", ioe);
+ }
+ });
+ scanner.clear();
+ if (source != null) {
+ source.close();
+ }
+ TEST_UTIL.deleteTableIfAny(tableName);
+ }
+ }
+
/**
* Tests that the client sees meta table changes as atomic during splits
*/
@@ -151,18 +232,17 @@ public class TestEndToEndSplitTransaction {
public void run() {
try {
Random random = new Random();
- for (int i= 0; i< 5; i++) {
- List<RegionInfo> regions =
- MetaTableAccessor.getTableRegions(connection, tableName, true);
+ for (int i = 0; i < 5; i++) {
+ List<RegionInfo> regions = MetaTableAccessor.getTableRegions(connection, tableName, true);
if (regions.isEmpty()) {
continue;
}
int regionIndex = random.nextInt(regions.size());
- //pick a random region and split it into two
+ // pick a random region and split it into two
RegionInfo region = Iterators.get(regions.iterator(), regionIndex);
- //pick the mid split point
+ // pick the mid split point
int start = 0, end = Integer.MAX_VALUE;
if (region.getStartKey().length > 0) {
start = Bytes.toInt(region.getStartKey());
@@ -173,7 +253,7 @@ public class TestEndToEndSplitTransaction {
int mid = start + ((end - start) / 2);
byte[] splitPoint = Bytes.toBytes(mid);
- //put some rows to the regions
+ // put some rows to the regions
addData(start);
addData(mid);
@@ -183,11 +263,11 @@ public class TestEndToEndSplitTransaction {
log("Initiating region split for:" + region.getRegionNameAsString());
try {
admin.splitRegion(region.getRegionName(), splitPoint);
- //wait until the split is complete
+ // wait until the split is complete
blockUntilRegionSplit(CONF, 50000, region.getRegionName(), true);
} catch (NotServingRegionException ex) {
- //ignore
+ // ignore
}
}
} catch (Throwable ex) {
@@ -226,9 +306,11 @@ public class TestEndToEndSplitTransaction {
/** verify region boundaries obtained from MetaScanner */
void verifyRegionsUsingMetaTableAccessor() throws Exception {
List<RegionInfo> regionList = MetaTableAccessor.getTableRegions(connection, tableName, true);
- verifyTableRegions(regionList.stream().collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR))));
+ verifyTableRegions(regionList.stream()
+ .collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR))));
regionList = MetaTableAccessor.getAllRegions(connection, true);
- verifyTableRegions(regionList.stream().collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR))));
+ verifyTableRegions(regionList.stream()
+ .collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR))));
}
/** verify region boundaries obtained from HTable.getStartEndKeys() */
@@ -343,7 +425,9 @@ public class TestEndToEndSplitTransaction {
}
}
- /** Blocks until the region split is complete in hbase:meta and region server opens the daughters */
+ /**
+ * Blocks until the region split is complete in hbase:meta and region server opens the daughters
+ */
public static void blockUntilRegionSplit(Configuration conf, long timeout,
final byte[] regionName, boolean waitForDaughters)
throws IOException, InterruptedException {
@@ -389,10 +473,32 @@ public class TestEndToEndSplitTransaction {
rem = timeout - (System.currentTimeMillis() - start);
blockUntilRegionIsOpened(conf, rem, daughterB);
+
+ // Compacting the new region to make sure references can be cleaned up
+ compactAndBlockUntilDone(TEST_UTIL.getAdmin(),
+ TEST_UTIL.getMiniHBaseCluster().getRegionServer(0), daughterA.getRegionName());
+ compactAndBlockUntilDone(TEST_UTIL.getAdmin(),
+ TEST_UTIL.getMiniHBaseCluster().getRegionServer(0), daughterB.getRegionName());
+
+ removeCompactedFiles(conn, timeout, daughterA);
+ removeCompactedFiles(conn, timeout, daughterB);
}
}
}
+ public static void removeCompactedFiles(Connection conn, long timeout, RegionInfo hri)
+ throws IOException, InterruptedException {
+ log("remove compacted files for : " + hri.getRegionNameAsString());
+ List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(hri.getTable());
+ regions.stream().forEach(r -> {
+ try {
+ r.getStores().get(0).closeAndArchiveCompactedFiles();
+ } catch (IOException ioe) {
+ LOG.error("failed in removing compacted file", ioe);
+ }
+ });
+ }
+
public static void blockUntilRegionIsInMeta(Connection conn, long timeout, RegionInfo hri)
throws IOException, InterruptedException {
log("blocking until region is in META: " + hri.getRegionNameAsString());
@@ -415,7 +521,9 @@ public class TestEndToEndSplitTransaction {
Table table = conn.getTable(hri.getTable())) {
byte[] row = hri.getStartKey();
// Check for null/empty row. If we find one, use a key that is likely to be in first region.
- if (row == null || row.length <= 0) row = new byte[] { '0' };
+ if (row == null || row.length <= 0) {
+ row = new byte[] { '0' };
+ }
Get get = new Get(row);
while (System.currentTimeMillis() - start < timeout) {
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/252f1bc5/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 95e0112..eb162de 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -31,7 +31,9 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -50,6 +52,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.DoNotRetryRegionException;
@@ -78,6 +81,10 @@ import org.apache.hadoop.hbase.master.assignment.RegionStates;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -85,7 +92,10 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HBaseFsck;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.apache.hadoop.hbase.util.RetryCounter;
import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.junit.After;
@@ -101,14 +111,6 @@ import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
-
/**
* The below tests are testing split region against a running cluster
*/
@@ -386,11 +388,18 @@ public class TestSplitTransactionOnCluster {
// Compact first to ensure we have cleaned up references -- else the split
// will fail.
this.admin.compactRegion(daughter.getRegionName());
+ RetryCounter retrier = new RetryCounter(30, 1, TimeUnit.SECONDS);
+ while (CompactionState.NONE != admin.getCompactionStateForRegion(daughter.getRegionName())
+ && retrier.shouldRetry()) {
+ retrier.sleepUntilNextRetry();
+ }
daughters = cluster.getRegions(tableName);
HRegion daughterRegion = null;
- for (HRegion r: daughters) {
+ for (HRegion r : daughters) {
if (RegionInfo.COMPARATOR.compare(r.getRegionInfo(), daughter) == 0) {
daughterRegion = r;
+ // Archiving the compacted references file
+ r.getStores().get(0).closeAndArchiveCompactedFiles();
LOG.info("Found matching HRI: " + daughterRegion);
break;
}
@@ -533,11 +542,19 @@ public class TestSplitTransactionOnCluster {
// Call split.
this.admin.splitRegion(hri.getRegionName());
List<HRegion> daughters = checkAndGetDaughters(tableName);
+
// Before cleanup, get a new master.
HMaster master = abortAndWaitForMaster();
// Now call compact on the daughters and clean up any references.
- for (HRegion daughter: daughters) {
+ for (HRegion daughter : daughters) {
daughter.compact(true);
+ RetryCounter retrier = new RetryCounter(30, 1, TimeUnit.SECONDS);
+ while (CompactionState.NONE != admin
+ .getCompactionStateForRegion(daughter.getRegionInfo().getRegionName())
+ && retrier.shouldRetry()) {
+ retrier.sleepUntilNextRetry();
+ }
+ daughter.getStores().get(0).closeAndArchiveCompactedFiles();
assertFalse(daughter.hasReferences());
}
// BUT calling compact on the daughters is not enough. The CatalogJanitor looks
[6/8] hbase git commit: HBASE-21047 Object creation of
StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal
Khandelwal)
Posted by ap...@apache.org.
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/971d4840
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/971d4840
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/971d4840
Branch: refs/heads/branch-1
Commit: 971d48406e4d26dcd4a004fc5543d01362acd3d9
Parents: 5520fa1
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Aug 16 11:43:24 2018 -0700
----------------------------------------------------------------------
.../hadoop/hbase/regionserver/StoreFile.java | 10 +++++++--
.../hbase/regionserver/StoreFileScanner.java | 1 +
.../hbase/regionserver/TestStoreFile.java | 23 ++++++++++++++++++++
3 files changed, 32 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/971d4840/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 1e74911..bf20c04 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -1219,13 +1219,19 @@ public class StoreFile {
*/
public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread,
boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) {
- // Increment the ref count
- refCount.incrementAndGet();
return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction), !isCompaction,
reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn);
}
/**
+ * Increment the ref count associated with the reader when ever a scanner associated with the
+ * reader is opened
+ */
+ void incrementRefCount() {
+ refCount.incrementAndGet();
+ }
+
+ /**
* Decrement the ref count associated with the reader when ever a scanner associated
* with the reader is closed
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/971d4840/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index b3f7fa6..7343eaf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
this.hasMVCCInfo = hasMVCC;
this.scannerOrder = scannerOrder;
this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+ this.reader.incrementRefCount();
}
boolean isPrimaryReplica() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/971d4840/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
index 9c9b4b2..6bd53c6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
@@ -235,6 +235,29 @@ public class TestStoreFile extends HBaseTestCase {
}
@Test
+ public void testStoreFileReference() throws Exception {
+ Path f = new Path(ROOT_DIR, getName());
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
+ // Make a store file and write data to it.
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs).withFilePath(f)
+ .withFileContext(meta).build();
+
+ writeStoreFile(writer);
+ writer.close();
+
+ // Creates a reader for StoreFile
+ StoreFile.Reader reader = new StoreFile.Reader(this.fs, f, cacheConf, conf);
+ StoreFileScanner scanner =
+ new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, false);
+
+ // Verify after instantiating scanner refCount is increased
+ assertTrue(scanner.getReader().isReferencedInReads());
+ scanner.close();
+ // Verify after closing scanner refCount is decreased
+ assertFalse(scanner.getReader().isReferencedInReads());
+ }
+
+ @Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFile.Reader reader = mock(StoreFile.Reader.class);
Store store = mock(Store.class);
[8/8] hbase git commit: HBASE-21047 Object creation of
StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal
Khandelwal)
Posted by ap...@apache.org.
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)
Conflicts:
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/efafa410
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/efafa410
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/efafa410
Branch: refs/heads/branch-1.3
Commit: efafa4108f75819c27e999a6b9e00fe9a37879ee
Parents: fe55991
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Aug 16 12:32:08 2018 -0700
----------------------------------------------------------------------
.../hadoop/hbase/regionserver/StoreFile.java | 18 +++++++++-------
.../hbase/regionserver/StoreFileScanner.java | 1 +
.../hbase/regionserver/TestStoreFile.java | 22 ++++++++++++++++++++
3 files changed, 34 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/efafa410/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 7a9dbe1..013573d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -1213,14 +1213,18 @@ public class StoreFile {
* @param isCompaction is scanner being used for compaction?
* @return a scanner
*/
- public StoreFileScanner getStoreFileScanner(boolean cacheBlocks,
- boolean pread,
- boolean isCompaction, long readPt) {
- // Increment the ref count
+ public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread,
+ boolean isCompaction, long readPt) {
+ return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction),
+ !isCompaction, reader.hasMVCCInfo(), readPt);
+ }
+
+ /**
+ * Increment the ref count associated with the reader when ever a scanner associated with the
+ * reader is opened
+ */
+ void incrementRefCount() {
refCount.incrementAndGet();
- return new StoreFileScanner(this,
- getScanner(cacheBlocks, pread, isCompaction),
- !isCompaction, reader.hasMVCCInfo(), readPt);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/efafa410/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 3a50ea9..d94e1d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -80,6 +80,7 @@ public class StoreFileScanner implements KeyValueScanner {
this.hfs = hfs;
this.enforceMVCC = useMVCC;
this.hasMVCCInfo = hasMVCC;
+ this.reader.incrementRefCount();
}
boolean isPrimaryReplica() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/efafa410/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
index da4593b..467f4d4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
@@ -233,6 +233,28 @@ public class TestStoreFile extends HBaseTestCase {
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
+ public void testStoreFileReference() throws Exception {
+ Path f = new Path(ROOT_DIR, getName());
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
+ // Make a store file and write data to it.
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs).withFilePath(f)
+ .withFileContext(meta).build();
+
+ writeStoreFile(writer);
+ writer.close();
+
+ // Creates a reader for StoreFile
+ StoreFile.Reader reader = new StoreFile.Reader(this.fs, f, cacheConf, conf);
+ StoreFileScanner scanner =
+ new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0);
+
+ // Verify after instantiating scanner refCount is increased
+ assertTrue(scanner.getReader().isReferencedInReads());
+ scanner.close();
+ // Verify after closing scanner refCount is decreased
+ assertFalse(scanner.getReader().isReferencedInReads());
+ }
+
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFile.Reader reader = mock(StoreFile.Reader.class);
Store store = mock(Store.class);
[3/8] hbase git commit: HBASE-21047 Object creation of
StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal
Khandelwal)
Posted by ap...@apache.org.
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1fa67725
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1fa67725
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1fa67725
Branch: refs/heads/branch-2
Commit: 1fa67725c5406f348dbb2266a8e8324ebd5a20b9
Parents: 7593977
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Aug 16 11:42:49 2018 -0700
----------------------------------------------------------------------
.../hbase/regionserver/StoreFileReader.java | 10 ++++-
.../hbase/regionserver/StoreFileScanner.java | 1 +
.../hbase/regionserver/TestHStoreFile.java | 40 +++++++++++++++++---
3 files changed, 44 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/1fa67725/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index db7b4f9..aeff1f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -148,13 +148,19 @@ public class StoreFileReader {
*/
public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread,
boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) {
- // Increment the ref count
- refCount.incrementAndGet();
return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction),
!isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn);
}
/**
+ * Indicate that the scanner has started reading with this reader. We need to increment the ref
+ * count so reader is not close until some object is holding the lock
+ */
+ void incrementRefCount() {
+ refCount.incrementAndGet();
+ }
+
+ /**
* Indicate that the scanner has finished reading with this reader. We need to decrement the ref
* count, and also, if this is not the common pread reader, we should close it.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/1fa67725/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 80d0ad7..b5b853a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
this.hasMVCCInfo = hasMVCC;
this.scannerOrder = scannerOrder;
this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+ this.reader.incrementRefCount();
}
boolean isPrimaryReplica() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/1fa67725/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 72da1a3..5cd0403 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -31,6 +31,7 @@ import java.util.Map;
import java.util.OptionalLong;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicInteger;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -65,6 +67,9 @@ import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
@@ -74,10 +79,6 @@ import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-
/**
* Test HStoreFile
*/
@@ -213,6 +214,35 @@ public class TestHStoreFile extends HBaseTestCase {
}
@Test
+ public void testStoreFileReference() throws Exception {
+ final RegionInfo hri =
+ RegionInfoBuilder.newBuilder(TableName.valueOf("testStoreFileReference")).build();
+ HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
+ new Path(testDir, hri.getTable().getNameAsString()), hri);
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
+
+ // Make a store file and write data to it.
+ StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
+ .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
+ writeStoreFile(writer);
+ Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
+ writer.close();
+
+ HStoreFile file = new HStoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE, true);
+ file.initReader();
+ StoreFileReader r = file.getReader();
+ assertNotNull(r);
+ StoreFileScanner scanner =
+ new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false);
+
+ // Verify after instantiating scanner refCount is increased
+ assertTrue("Verify file is being referenced", file.isReferencedInReads());
+ scanner.close();
+ // Verify after closing scanner refCount is decreased
+ assertFalse("Verify file is not being referenced", file.isReferencedInReads());
+ }
+
+ @Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFileReader reader = mock(StoreFileReader.class);
HStore store = mock(HStore.class);
[4/8] hbase git commit: HBASE-21047 Object creation of
StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal
Khandelwal)
Posted by ap...@apache.org.
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4994101
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4994101
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4994101
Branch: refs/heads/branch-2.1
Commit: b49941012a2f414d44ee96cf35e0e8d2eef38b98
Parents: 145c92f
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Aug 16 11:42:54 2018 -0700
----------------------------------------------------------------------
.../hbase/regionserver/StoreFileReader.java | 10 ++++-
.../hbase/regionserver/StoreFileScanner.java | 1 +
.../hbase/regionserver/TestHStoreFile.java | 40 +++++++++++++++++---
3 files changed, 44 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/b4994101/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index db7b4f9..aeff1f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -148,13 +148,19 @@ public class StoreFileReader {
*/
public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread,
boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) {
- // Increment the ref count
- refCount.incrementAndGet();
return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction),
!isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn);
}
/**
+ * Indicate that the scanner has started reading with this reader. We need to increment the ref
+ * count so reader is not close until some object is holding the lock
+ */
+ void incrementRefCount() {
+ refCount.incrementAndGet();
+ }
+
+ /**
* Indicate that the scanner has finished reading with this reader. We need to decrement the ref
* count, and also, if this is not the common pread reader, we should close it.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/b4994101/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 80d0ad7..b5b853a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
this.hasMVCCInfo = hasMVCC;
this.scannerOrder = scannerOrder;
this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+ this.reader.incrementRefCount();
}
boolean isPrimaryReplica() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/b4994101/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 72da1a3..5cd0403 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -31,6 +31,7 @@ import java.util.Map;
import java.util.OptionalLong;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicInteger;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -65,6 +67,9 @@ import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
@@ -74,10 +79,6 @@ import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-
/**
* Test HStoreFile
*/
@@ -213,6 +214,35 @@ public class TestHStoreFile extends HBaseTestCase {
}
@Test
+ public void testStoreFileReference() throws Exception {
+ final RegionInfo hri =
+ RegionInfoBuilder.newBuilder(TableName.valueOf("testStoreFileReference")).build();
+ HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
+ new Path(testDir, hri.getTable().getNameAsString()), hri);
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
+
+ // Make a store file and write data to it.
+ StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
+ .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
+ writeStoreFile(writer);
+ Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
+ writer.close();
+
+ HStoreFile file = new HStoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE, true);
+ file.initReader();
+ StoreFileReader r = file.getReader();
+ assertNotNull(r);
+ StoreFileScanner scanner =
+ new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false);
+
+ // Verify after instantiating scanner refCount is increased
+ assertTrue("Verify file is being referenced", file.isReferencedInReads());
+ scanner.close();
+ // Verify after closing scanner refCount is decreased
+ assertFalse("Verify file is not being referenced", file.isReferencedInReads());
+ }
+
+ @Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFileReader reader = mock(StoreFileReader.class);
HStore store = mock(HStore.class);
[7/8] hbase git commit: HBASE-21047 Object creation of
StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal
Khandelwal)
Posted by ap...@apache.org.
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e5a1cb2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e5a1cb2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e5a1cb2
Branch: refs/heads/branch-1.4
Commit: 9e5a1cb2ba71583e12f2d9fadd2bc82e911cdac8
Parents: 472a13a
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Aug 16 11:43:32 2018 -0700
----------------------------------------------------------------------
.../hadoop/hbase/regionserver/StoreFile.java | 10 +++++++--
.../hbase/regionserver/StoreFileScanner.java | 1 +
.../hbase/regionserver/TestStoreFile.java | 23 ++++++++++++++++++++
3 files changed, 32 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/9e5a1cb2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 803bfb3..06cc57c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -1209,13 +1209,19 @@ public class StoreFile {
*/
public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread,
boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) {
- // Increment the ref count
- refCount.incrementAndGet();
return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction), !isCompaction,
reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn);
}
/**
+ * Increment the ref count associated with the reader when ever a scanner associated with the
+ * reader is opened
+ */
+ void incrementRefCount() {
+ refCount.incrementAndGet();
+ }
+
+ /**
* Decrement the ref count associated with the reader when ever a scanner associated
* with the reader is closed
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/9e5a1cb2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index b3f7fa6..7343eaf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
this.hasMVCCInfo = hasMVCC;
this.scannerOrder = scannerOrder;
this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+ this.reader.incrementRefCount();
}
boolean isPrimaryReplica() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/9e5a1cb2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
index 9c9b4b2..6bd53c6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
@@ -235,6 +235,29 @@ public class TestStoreFile extends HBaseTestCase {
}
@Test
+ public void testStoreFileReference() throws Exception {
+ Path f = new Path(ROOT_DIR, getName());
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
+ // Make a store file and write data to it.
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs).withFilePath(f)
+ .withFileContext(meta).build();
+
+ writeStoreFile(writer);
+ writer.close();
+
+ // Creates a reader for StoreFile
+ StoreFile.Reader reader = new StoreFile.Reader(this.fs, f, cacheConf, conf);
+ StoreFileScanner scanner =
+ new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, false);
+
+ // Verify after instantiating scanner refCount is increased
+ assertTrue(scanner.getReader().isReferencedInReads());
+ scanner.close();
+ // Verify after closing scanner refCount is decreased
+ assertFalse(scanner.getReader().isReferencedInReads());
+ }
+
+ @Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFile.Reader reader = mock(StoreFile.Reader.class);
Store store = mock(Store.class);
[2/8] hbase git commit: HBASE-21047 Object creation of
StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal
Khandelwal)
Posted by ap...@apache.org.
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1dbd6fa9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1dbd6fa9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1dbd6fa9
Branch: refs/heads/master
Commit: 1dbd6fa993ba9f6adbbac942e39ac5cd67fa8ee2
Parents: 2cfe1e8
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Aug 16 11:42:15 2018 -0700
----------------------------------------------------------------------
.../hbase/regionserver/StoreFileReader.java | 10 ++++-
.../hbase/regionserver/StoreFileScanner.java | 1 +
.../hbase/regionserver/TestHStoreFile.java | 40 +++++++++++++++++---
3 files changed, 44 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/1dbd6fa9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index db7b4f9..aeff1f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -148,13 +148,19 @@ public class StoreFileReader {
*/
public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread,
boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) {
- // Increment the ref count
- refCount.incrementAndGet();
return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction),
!isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn);
}
/**
+ * Indicate that the scanner has started reading with this reader. We need to increment the ref
+ * count so reader is not close until some object is holding the lock
+ */
+ void incrementRefCount() {
+ refCount.incrementAndGet();
+ }
+
+ /**
* Indicate that the scanner has finished reading with this reader. We need to decrement the ref
* count, and also, if this is not the common pread reader, we should close it.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/1dbd6fa9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 80d0ad7..b5b853a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
this.hasMVCCInfo = hasMVCC;
this.scannerOrder = scannerOrder;
this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+ this.reader.incrementRefCount();
}
boolean isPrimaryReplica() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/1dbd6fa9/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 72da1a3..5cd0403 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -31,6 +31,7 @@ import java.util.Map;
import java.util.OptionalLong;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicInteger;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -65,6 +67,9 @@ import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
@@ -74,10 +79,6 @@ import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-
/**
* Test HStoreFile
*/
@@ -213,6 +214,35 @@ public class TestHStoreFile extends HBaseTestCase {
}
@Test
+ public void testStoreFileReference() throws Exception {
+ final RegionInfo hri =
+ RegionInfoBuilder.newBuilder(TableName.valueOf("testStoreFileReference")).build();
+ HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
+ new Path(testDir, hri.getTable().getNameAsString()), hri);
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
+
+ // Make a store file and write data to it.
+ StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
+ .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
+ writeStoreFile(writer);
+ Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
+ writer.close();
+
+ HStoreFile file = new HStoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE, true);
+ file.initReader();
+ StoreFileReader r = file.getReader();
+ assertNotNull(r);
+ StoreFileScanner scanner =
+ new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false);
+
+ // Verify after instantiating scanner refCount is increased
+ assertTrue("Verify file is being referenced", file.isReferencedInReads());
+ scanner.close();
+ // Verify after closing scanner refCount is decreased
+ assertFalse("Verify file is not being referenced", file.isReferencedInReads());
+ }
+
+ @Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFileReader reader = mock(StoreFileReader.class);
HStore store = mock(HStore.class);
[5/8] hbase git commit: HBASE-21047 Object creation of
StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal
Khandelwal)
Posted by ap...@apache.org.
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7564efaf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7564efaf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7564efaf
Branch: refs/heads/branch-2.0
Commit: 7564efaf9351f40e26458fa25155858db725faa1
Parents: 252f1bc
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Aug 16 11:42:58 2018 -0700
----------------------------------------------------------------------
.../hbase/regionserver/StoreFileReader.java | 10 ++++-
.../hbase/regionserver/StoreFileScanner.java | 1 +
.../hbase/regionserver/TestHStoreFile.java | 40 +++++++++++++++++---
3 files changed, 44 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/7564efaf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index db7b4f9..aeff1f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -148,13 +148,19 @@ public class StoreFileReader {
*/
public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread,
boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) {
- // Increment the ref count
- refCount.incrementAndGet();
return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction),
!isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn);
}
/**
+ * Indicate that the scanner has started reading with this reader. We need to increment the ref
+ * count so reader is not close until some object is holding the lock
+ */
+ void incrementRefCount() {
+ refCount.incrementAndGet();
+ }
+
+ /**
* Indicate that the scanner has finished reading with this reader. We need to decrement the ref
* count, and also, if this is not the common pread reader, we should close it.
*/
http://git-wip-us.apache.org/repos/asf/hbase/blob/7564efaf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 80d0ad7..b5b853a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
this.hasMVCCInfo = hasMVCC;
this.scannerOrder = scannerOrder;
this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+ this.reader.incrementRefCount();
}
boolean isPrimaryReplica() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/7564efaf/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 72da1a3..5cd0403 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -31,6 +31,7 @@ import java.util.Map;
import java.util.OptionalLong;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicInteger;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -65,6 +67,9 @@ import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
@@ -74,10 +79,6 @@ import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-
/**
* Test HStoreFile
*/
@@ -213,6 +214,35 @@ public class TestHStoreFile extends HBaseTestCase {
}
@Test
+ public void testStoreFileReference() throws Exception {
+ final RegionInfo hri =
+ RegionInfoBuilder.newBuilder(TableName.valueOf("testStoreFileReference")).build();
+ HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
+ new Path(testDir, hri.getTable().getNameAsString()), hri);
+ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
+
+ // Make a store file and write data to it.
+ StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
+ .withFilePath(regionFs.createTempName()).withFileContext(meta).build();
+ writeStoreFile(writer);
+ Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
+ writer.close();
+
+ HStoreFile file = new HStoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE, true);
+ file.initReader();
+ StoreFileReader r = file.getReader();
+ assertNotNull(r);
+ StoreFileScanner scanner =
+ new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false);
+
+ // Verify after instantiating scanner refCount is increased
+ assertTrue("Verify file is being referenced", file.isReferencedInReads());
+ scanner.close();
+ // Verify after closing scanner refCount is decreased
+ assertFalse("Verify file is not being referenced", file.isReferencedInReads());
+ }
+
+ @Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFileReader reader = mock(StoreFileReader.class);
HStore store = mock(HStore.class);