You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2021/11/06 15:04:38 UTC
[hbase] 08/12: HBASE-26280 Use store file tracker when snapshoting
(#3685)
This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch HBASE-26067
in repository https://gitbox.apache.org/repos/asf/hbase.git
commit 08ba5e99d411d4a7868818390f08cf0af9517d4f
Author: Duo Zhang <zh...@apache.org>
AuthorDate: Fri Sep 17 09:40:44 2021 +0800
HBASE-26280 Use store file tracker when snapshoting (#3685)
Signed-off-by: Wellington Chevreuil <wc...@apache.org>
Reviewed-by: Josh Elser <el...@apache.org>
---
.../assignment/MergeTableRegionsProcedure.java | 5 +-
.../assignment/SplitTableRegionProcedure.java | 5 +-
.../hbase/regionserver/HRegionFileSystem.java | 10 +--
.../storefiletracker/StoreFileTrackerFactory.java | 17 ++---
.../hadoop/hbase/snapshot/SnapshotManifest.java | 42 ++++++------
...oneSnapshotFromClientCloneLinksAfterDelete.java | 4 +-
.../hbase/client/TestMobSnapshotFromClient.java | 7 +-
.../hbase/client/TestSnapshotFromClient.java | 30 +++++++--
.../hadoop/hbase/regionserver/TestHStoreFile.java | 6 +-
.../hbase/snapshot/MobSnapshotTestingUtils.java | 74 +++++++++++-----------
.../hbase/snapshot/SnapshotTestingUtils.java | 16 ++---
11 files changed, 107 insertions(+), 109 deletions(-)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index e9051da..0f41db5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -612,9 +612,8 @@ public class MergeTableRegionsProcedure
List<Path> mergedFiles = new ArrayList<>();
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
String family = hcd.getNameAsString();
- Configuration trackerConfig =
- StoreFileTrackerFactory.mergeConfigurations(env.getMasterConfiguration(), htd, hcd);
- StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, family, regionFs);
+ StoreFileTracker tracker =
+ StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, hcd, regionFs);
final Collection<StoreFileInfo> storeFiles = tracker.load();
if (storeFiles != null && storeFiles.size() > 0) {
final Configuration storeConfiguration =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 15aeb29..52addd5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -667,9 +667,8 @@ public class SplitTableRegionProcedure
new HashMap<String, Collection<StoreFileInfo>>(htd.getColumnFamilyCount());
for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
String family = cfd.getNameAsString();
- Configuration trackerConfig = StoreFileTrackerFactory.
- mergeConfigurations(env.getMasterConfiguration(), htd, htd.getColumnFamily(cfd.getName()));
- StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, family, regionFs);
+ StoreFileTracker tracker =
+ StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, cfd, regionFs);
Collection<StoreFileInfo> sfis = tracker.load();
if (sfis == null) {
continue;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index da0fdcb..4bcebd9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -595,7 +595,6 @@ public class HRegionFileSystem {
* to the proper location in the filesystem.
*
* @param regionInfo daughter {@link org.apache.hadoop.hbase.client.RegionInfo}
- * @throws IOException
*/
public Path commitDaughterRegion(final RegionInfo regionInfo, List<Path> allRegionFiles,
MasterProcedureEnv env) throws IOException {
@@ -622,12 +621,8 @@ public class HRegionFileSystem {
Map<String, List<StoreFileInfo>> fileInfoMap = new HashMap<>();
for(Path file : allFiles) {
String familyName = file.getParent().getName();
- trackerMap.computeIfAbsent(familyName, t -> {
- Configuration config = StoreFileTrackerFactory.mergeConfigurations(conf, tblDesc,
- tblDesc.getColumnFamily(Bytes.toBytes(familyName)));
- return StoreFileTrackerFactory.
- create(config, familyName, regionFs);
- });
+ trackerMap.computeIfAbsent(familyName, t -> StoreFileTrackerFactory.create(conf, tblDesc,
+ tblDesc.getColumnFamily(Bytes.toBytes(familyName)), regionFs));
fileInfoMap.computeIfAbsent(familyName, l -> new ArrayList<>());
List<StoreFileInfo> infos = fileInfoMap.get(familyName);
infos.add(new StoreFileInfo(conf, fs, file, true));
@@ -673,7 +668,6 @@ public class HRegionFileSystem {
* this method is invoked on the Master side, then the RegionSplitPolicy will
* NOT have a reference to a Region.
* @return Path to created reference.
- * @throws IOException
*/
public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte[] splitRow,
boolean top, RegionSplitPolicy splitPolicy) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java
index 90704fe..b586027 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerFactory.java
@@ -22,13 +22,11 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreContext;
import org.apache.hadoop.hbase.regionserver.StoreUtils;
-import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -113,16 +111,15 @@ public final class StoreFileTrackerFactory {
* Used at master side when splitting/merging regions, as we do not have a Store, thus no
* StoreContext at master side.
*/
- public static StoreFileTracker create(Configuration conf, String family,
- HRegionFileSystem regionFs) {
- ColumnFamilyDescriptorBuilder fDescBuilder =
- ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family));
- StoreContext ctx = StoreContext.getBuilder().withColumnFamilyDescriptor(fDescBuilder.build())
- .withRegionFileSystem(regionFs).build();
- return StoreFileTrackerFactory.create(conf, true, ctx);
+ public static StoreFileTracker create(Configuration conf, TableDescriptor td,
+ ColumnFamilyDescriptor cfd, HRegionFileSystem regionFs) {
+ StoreContext ctx =
+ StoreContext.getBuilder().withColumnFamilyDescriptor(cfd).withRegionFileSystem(regionFs)
+ .withFamilyStoreDirectoryPath(regionFs.getStoreDir(cfd.getNameAsString())).build();
+ return StoreFileTrackerFactory.create(mergeConfigurations(conf, td, cfd), true, ctx);
}
- public static Configuration mergeConfigurations(Configuration global, TableDescriptor table,
+ private static Configuration mergeConfigurations(Configuration global, TableDescriptor table,
ColumnFamilyDescriptor family) {
return StoreUtils.createStoreConfiguration(global, table, family);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 5e82cad..f154aa9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -47,7 +47,8 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.Threads;
@@ -291,8 +292,8 @@ public final class SnapshotManifest {
addRegion(tableDir, regionInfo, visitor);
}
- protected void addRegion(final Path tableDir, final RegionInfo regionInfo, RegionVisitor visitor)
- throws IOException {
+ protected void addRegion(Path tableDir, RegionInfo regionInfo, RegionVisitor visitor)
+ throws IOException {
boolean isMobRegion = MobUtils.isMobRegionInfo(regionInfo);
try {
Path baseDir = tableDir;
@@ -300,8 +301,8 @@ public final class SnapshotManifest {
if (isMobRegion) {
baseDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), regionInfo.getTable());
}
- HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, rootFs,
- baseDir, regionInfo, true);
+ HRegionFileSystem regionFs =
+ HRegionFileSystem.openRegionFromFileSystem(conf, rootFs, baseDir, regionInfo, true);
monitor.rethrowException();
// 1. dump region meta info into the snapshot directory
@@ -317,26 +318,19 @@ public final class SnapshotManifest {
// in batches and may miss files being added/deleted. This could be more robust (iteratively
// checking to see if we have all the files until we are sure), but the limit is currently
// 1000 files/batch, far more than the number of store files under a single column family.
- Collection<String> familyNames = regionFs.getFamilies();
- if (familyNames != null) {
- for (String familyName: familyNames) {
- Object familyData = visitor.familyOpen(regionData, Bytes.toBytes(familyName));
- monitor.rethrowException();
-
- Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName);
- if (storeFiles == null) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("No files under family: " + familyName);
- }
- continue;
- }
-
- // 2.1. build the snapshot reference for the store
- // iterate through all the store's files and create "references".
- addReferenceFiles(visitor, regionData, familyData, storeFiles, false);
-
- visitor.familyClose(regionData, familyData);
+ for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
+ Object familyData = visitor.familyOpen(regionData, cfd.getName());
+ monitor.rethrowException();
+ StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, htd, cfd, regionFs);
+ List<StoreFileInfo> storeFiles = tracker.load();
+ if (storeFiles.isEmpty()) {
+ LOG.debug("No files under family: {}", cfd.getNameAsString());
+ continue;
}
+ // 2.1. build the snapshot reference for the store
+ // iterate through all the store's files and create "references".
+ addReferenceFiles(visitor, regionData, familyData, storeFiles, false);
+ visitor.familyClose(regionData, familyData);
}
visitor.regionClose(regionData);
} catch (IOException e) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java
index c2087a9..e352303 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -93,7 +94,8 @@ public class TestMobCloneSnapshotFromClientCloneLinksAfterDelete
@Override
protected void createTable() throws IOException, InterruptedException {
MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName,
- SnapshotTestingUtils.getSplitKeys(), getNumReplicas(), DelayFlushCoprocessor.class.getName(),
+ SnapshotTestingUtils.getSplitKeys(), getNumReplicas(),
+ StoreFileTrackerFactory.Trackers.DEFAULT.name(), DelayFlushCoprocessor.class.getName(),
FAMILY);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
index cdc41b0..0695be1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java
@@ -26,8 +26,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Test create/using/deleting snapshots from the client
@@ -41,8 +39,6 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestMobSnapshotFromClient.class);
- private static final Logger LOG = LoggerFactory.getLogger(TestMobSnapshotFromClient.class);
-
/**
* Setup the config for the cluster
* @throws Exception on failure
@@ -60,6 +56,7 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient {
@Override
protected void createTable() throws Exception {
- MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, getNumReplicas(), TEST_FAM);
+ MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, getNumReplicas(), trackerImpl.name(),
+ TEST_FAM);
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
index 08e33ac..56a48c1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
@@ -32,9 +33,11 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1;
@@ -51,7 +54,10 @@ import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -64,7 +70,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
* <p>
* This is an end-to-end test for the snapshot utility
*/
-@Category({LargeTests.class, ClientTests.class})
+@RunWith(Parameterized.class)
+@Category({ LargeTests.class, ClientTests.class })
public class TestSnapshotFromClient {
@ClassRule
@@ -82,7 +89,16 @@ public class TestSnapshotFromClient {
private static final Pattern MATCH_ALL = Pattern.compile(".*");
@Rule
- public TestName name = new TestName();
+ public TableNameTestRule name = new TableNameTestRule();
+
+ @Parameter
+ public StoreFileTrackerFactory.Trackers trackerImpl;
+
+ @Parameters(name = "{index}: tracker={0}")
+ public static List<Object[]> params() {
+ return Arrays.asList(new Object[] { StoreFileTrackerFactory.Trackers.DEFAULT },
+ new Object[] { StoreFileTrackerFactory.Trackers.FILE });
+ }
/**
* Setup the config for the cluster
@@ -109,7 +125,6 @@ public class TestSnapshotFromClient {
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
-
}
@Before
@@ -119,7 +134,8 @@ public class TestSnapshotFromClient {
protected void createTable() throws Exception {
TableDescriptor htd =
- TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas()).build();
+ TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas())
+ .setValue(StoreFileTrackerFactory.TRACKER_IMPL, trackerImpl.name()).build();
UTIL.createTable(htd, new byte[][] { TEST_FAM }, null);
}
@@ -316,7 +332,7 @@ public class TestSnapshotFromClient {
@Test
public void testListTableSnapshots() throws Exception {
Admin admin = null;
- final TableName tableName = TableName.valueOf(name.getMethodName());
+ final TableName tableName = name.getTableName();
try {
admin = UTIL.getAdmin();
@@ -401,7 +417,7 @@ public class TestSnapshotFromClient {
@Test
public void testDeleteTableSnapshots() throws Exception {
Admin admin = null;
- final TableName tableName = TableName.valueOf(name.getMethodName());
+ final TableName tableName = name.getTableName();
try {
admin = UTIL.getAdmin();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index c7203a9..b4f628b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -1068,10 +1069,9 @@ public class TestHStoreFile {
when(mockEnv.getMasterConfiguration()).thenReturn(new Configuration());
TableDescriptors mockTblDescs = mock(TableDescriptors.class);
when(mockServices.getTableDescriptors()).thenReturn(mockTblDescs);
- TableDescriptor mockTblDesc = mock(TableDescriptor.class);
+ TableDescriptor mockTblDesc = TableDescriptorBuilder.newBuilder(hri.getTable())
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
when(mockTblDescs.get(any())).thenReturn(mockTblDesc);
- ColumnFamilyDescriptor mockCfDesc = mock(ColumnFamilyDescriptor.class);
- when(mockTblDesc.getColumnFamily(any())).thenReturn(mockCfDesc);
Path regionDir = regionFs.commitDaughterRegion(hri, splitFiles, mockEnv);
return new Path(new Path(regionDir, family), path.getName());
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
index fea4fb4..7523ae8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Assert;
@@ -45,29 +46,40 @@ public class MobSnapshotTestingUtils {
/**
* Create the Mob Table.
*/
- public static void createMobTable(final HBaseTestingUtil util,
- final TableName tableName, int regionReplication,
- final byte[]... families) throws IOException, InterruptedException {
- createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(),
- regionReplication, families);
+ public static void createMobTable(final HBaseTestingUtil util, final TableName tableName,
+ int regionReplication, final byte[]... families) throws IOException, InterruptedException {
+ createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(), regionReplication,
+ StoreFileTrackerFactory.Trackers.DEFAULT.name(), families);
+ }
+
+ public static void createMobTable(final HBaseTestingUtil util, final TableName tableName,
+ int regionReplication, String storeFileTracker, final byte[]... families)
+ throws IOException, InterruptedException {
+ createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(), regionReplication,
+ storeFileTracker, families);
}
- public static void createPreSplitMobTable(final HBaseTestingUtil util,
- final TableName tableName, int nRegions, final byte[]... families)
- throws IOException, InterruptedException {
- createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(nRegions),
- 1, families);
+ public static void createPreSplitMobTable(final HBaseTestingUtil util, final TableName tableName,
+ int nRegions, final byte[]... families) throws IOException, InterruptedException {
+ createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(nRegions), 1, families);
+ }
+
+ public static void createMobTable(final HBaseTestingUtil util, final TableName tableName,
+ final byte[][] splitKeys, int regionReplication, final byte[]... families)
+ throws IOException, InterruptedException {
+ createMobTable(util, tableName, splitKeys, regionReplication,
+ StoreFileTrackerFactory.Trackers.DEFAULT.name(), families);
}
public static void createMobTable(final HBaseTestingUtil util, final TableName tableName,
- final byte[][] splitKeys, int regionReplication, final byte[]... families)
- throws IOException, InterruptedException {
- createMobTable(util, tableName, splitKeys, regionReplication, null, families);
+ final byte[][] splitKeys, int regionReplication, String storeFileTracker,
+ final byte[]... families) throws IOException, InterruptedException {
+ createMobTable(util, tableName, splitKeys, regionReplication, storeFileTracker, null, families);
}
- public static void createMobTable(HBaseTestingUtil util, TableName tableName,
- byte[][] splitKeys, int regionReplication, String cpClassName, byte[]... families)
- throws IOException, InterruptedException {
+ public static void createMobTable(HBaseTestingUtil util, TableName tableName, byte[][] splitKeys,
+ int regionReplication, String storeFileTracker, String cpClassName, byte[]... families)
+ throws IOException, InterruptedException {
TableDescriptorBuilder builder =
TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication);
for (byte[] family : families) {
@@ -77,6 +89,7 @@ public class MobSnapshotTestingUtils {
if (!StringUtils.isBlank(cpClassName)) {
builder.setCoprocessor(cpClassName);
}
+ builder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, storeFileTracker);
util.getAdmin().createTable(builder.build(), splitKeys);
SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
assertEquals((splitKeys.length + 1) * regionReplication,
@@ -85,15 +98,10 @@ public class MobSnapshotTestingUtils {
/**
* Create a Mob table.
- *
- * @param util
- * @param tableName
- * @param families
* @return An Table instance for the created table.
- * @throws IOException
*/
- public static Table createMobTable(final HBaseTestingUtil util,
- final TableName tableName, final byte[]... families) throws IOException {
+ public static Table createMobTable(final HBaseTestingUtil util, final TableName tableName,
+ final byte[]... families) throws IOException {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
for (byte[] family : families) {
// Disable blooms (they are on by default as of 0.95) but we disable them
@@ -102,10 +110,7 @@ public class MobSnapshotTestingUtils {
// and blooms being
// on is interfering.
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
- .setBloomFilterType(BloomType.NONE)
- .setMobEnabled(true)
- .setMobThreshold(0L)
- .build());
+ .setBloomFilterType(BloomType.NONE).setMobEnabled(true).setMobThreshold(0L).build());
}
util.getAdmin().createTable(builder.build());
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait
@@ -135,8 +140,8 @@ public class MobSnapshotTestingUtils {
}
}
- public static void verifyMobRowCount(final HBaseTestingUtil util,
- final TableName tableName, long expectedRows) throws IOException {
+ public static void verifyMobRowCount(final HBaseTestingUtil util, final TableName tableName,
+ long expectedRows) throws IOException {
Table table = ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName);
try {
@@ -156,13 +161,10 @@ public class MobSnapshotTestingUtils {
@Override
public TableDescriptor createHtd(final String tableName) {
- return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
- .setColumnFamily(ColumnFamilyDescriptorBuilder
- .newBuilder(Bytes.toBytes(TEST_FAMILY))
- .setMobEnabled(true)
- .setMobThreshold(0L)
- .build())
- .build();
+ return TableDescriptorBuilder
+ .newBuilder(TableName.valueOf(tableName)).setColumnFamily(ColumnFamilyDescriptorBuilder
+ .newBuilder(Bytes.toBytes(TEST_FAMILY)).setMobEnabled(true).setMobThreshold(0L).build())
+ .build();
}
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index d8d2a5e..00d2e84 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -480,9 +480,8 @@ public final class SnapshotTestingUtils {
this.desc = desc;
this.tableRegions = tableRegions;
this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
- new FSTableDescriptors(conf)
- .createTableDescriptorForTableDirectory(this.snapshotDir.getFileSystem(conf),
- snapshotDir, htd, false);
+ FSTableDescriptors.createTableDescriptorForTableDirectory(
+ this.snapshotDir.getFileSystem(conf), snapshotDir, htd, false);
}
public TableDescriptor getTableDescriptor() {
@@ -502,15 +501,13 @@ public final class SnapshotTestingUtils {
}
public Path[] addRegionV1() throws IOException {
- return addRegion(desc.toBuilder()
- .setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION)
- .build());
+ return addRegion(
+ desc.toBuilder().setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION).build());
}
public Path[] addRegionV2() throws IOException {
- return addRegion(desc.toBuilder()
- .setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION)
- .build());
+ return addRegion(
+ desc.toBuilder().setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION).build());
}
private Path[] addRegion(final SnapshotProtos.SnapshotDescription desc) throws IOException {
@@ -521,6 +518,7 @@ public final class SnapshotTestingUtils {
RegionData regionData = tableRegions[this.snapshotted++];
ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getName());
SnapshotManifest manifest = SnapshotManifest.create(conf, fs, snapshotDir, desc, monitor);
+ manifest.addTableDescriptor(htd);
manifest.addRegion(regionData.tableDir, regionData.hri);
return regionData.files;
}