You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jm...@apache.org on 2015/07/22 21:52:25 UTC
[07/50] [abbrv] hbase git commit: Merge branch 'apache/master'
(4/16/15) into hbase-11339
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
index b7af75e,0000000..d891c20
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java
@@@ -1,349 -1,0 +1,347 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.*;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.Result;
- import org.apache.hadoop.hbase.client.ResultScanner;
- import org.apache.hadoop.hbase.client.Scan;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.Assert;
+
+public class MobSnapshotTestingUtils {
+
+ /**
+ * Create the Mob Table.
+ */
+ public static void createMobTable(final HBaseTestingUtility util,
+ final TableName tableName, int regionReplication,
+ final byte[]... families) throws IOException, InterruptedException {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ htd.setRegionReplication(regionReplication);
+ for (byte[] family : families) {
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0L);
+ htd.addFamily(hcd);
+ }
+ byte[][] splitKeys = SnapshotTestingUtils.getSplitKeys();
+ util.getHBaseAdmin().createTable(htd, splitKeys);
+ SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
+ assertEquals((splitKeys.length + 1) * regionReplication, util
+ .getHBaseAdmin().getTableRegions(tableName).size());
+ }
+
+ /**
+ * Create a Mob table.
+ *
+ * @param util
+ * @param tableName
+ * @param families
+ * @return An HTable instance for the created table.
+ * @throws IOException
+ */
- public static HTable createMobTable(final HBaseTestingUtility util,
++ public static Table createMobTable(final HBaseTestingUtility util,
+ final TableName tableName, final byte[]... families) throws IOException {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ for (byte[] family : families) {
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ // Disable blooms (they are on by default as of 0.95) but we disable them
+ // here because
+ // tests have hard coded counts of what to expect in block cache, etc.,
+ // and blooms being
+ // on is interfering.
+ hcd.setBloomFilterType(BloomType.NONE);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0L);
+ htd.addFamily(hcd);
+ }
+ util.getHBaseAdmin().createTable(htd);
+ // HBaseAdmin only waits for regions to appear in hbase:meta we should wait
+ // until they are assigned
+ util.waitUntilAllRegionsAssigned(htd.getTableName());
- return new HTable(util.getConfiguration(), htd.getTableName());
++ return ConnectionFactory.createConnection(util.getConfiguration()).getTable(htd.getTableName());
+ }
+
+ /**
+ * Return the number of rows in the given table.
+ */
- public static int countMobRows(final HTable table) throws IOException {
++ public static int countMobRows(final Table table) throws IOException {
+ Scan scan = new Scan();
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ count++;
+ List<Cell> cells = res.listCells();
+ for (Cell cell : cells) {
+ // Verify the value
+ Assert.assertTrue(CellUtil.cloneValue(cell).length > 0);
+ }
+ }
+ results.close();
+ return count;
+ }
+
+ /**
+ * Return the number of rows in the given table.
+ */
- public static int countMobRows(final HTable table, final byte[]... families)
++ public static int countMobRows(final Table table, final byte[]... families)
+ throws IOException {
+ Scan scan = new Scan();
+ for (byte[] family : families) {
+ scan.addFamily(family);
+ }
+ ResultScanner results = table.getScanner(scan);
+ int count = 0;
+ for (Result res : results) {
+ count++;
+ List<Cell> cells = res.listCells();
+ for (Cell cell : cells) {
+ // Verify the value
+ Assert.assertTrue(CellUtil.cloneValue(cell).length > 0);
+ }
+ }
+ results.close();
+ return count;
+ }
+
+ public static void verifyMobRowCount(final HBaseTestingUtility util,
+ final TableName tableName, long expectedRows) throws IOException {
- HTable table = new HTable(util.getConfiguration(), tableName);
++
++ Table table = ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName);
+ try {
+ assertEquals(expectedRows, countMobRows(table));
+ } finally {
+ table.close();
+ }
+ }
+
+ // ==========================================================================
+ // Snapshot Mock
+ // ==========================================================================
+ public static class SnapshotMock {
+ private final static String TEST_FAMILY = "cf";
+ public final static int TEST_NUM_REGIONS = 4;
+
+ private final Configuration conf;
+ private final FileSystem fs;
+ private final Path rootDir;
+
+ static class RegionData {
+ public HRegionInfo hri;
+ public Path tableDir;
+ public Path[] files;
+
+ public RegionData(final Path tableDir, final HRegionInfo hri,
+ final int nfiles) {
+ this.tableDir = tableDir;
+ this.hri = hri;
+ this.files = new Path[nfiles];
+ }
+ }
+
+ public static class SnapshotBuilder {
+ private final RegionData[] tableRegions;
+ private final SnapshotDescription desc;
+ private final HTableDescriptor htd;
+ private final Configuration conf;
+ private final FileSystem fs;
+ private final Path rootDir;
+ private Path snapshotDir;
+ private int snapshotted = 0;
+
+ public SnapshotBuilder(final Configuration conf, final FileSystem fs,
+ final Path rootDir, final HTableDescriptor htd,
+ final SnapshotDescription desc, final RegionData[] tableRegions)
+ throws IOException {
+ this.fs = fs;
+ this.conf = conf;
+ this.rootDir = rootDir;
+ this.htd = htd;
+ this.desc = desc;
+ this.tableRegions = tableRegions;
+ this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc,
+ rootDir);
+ new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(
+ snapshotDir, new TableDescriptor(htd), false);
+ }
+
+ public HTableDescriptor getTableDescriptor() {
+ return this.htd;
+ }
+
+ public SnapshotDescription getSnapshotDescription() {
+ return this.desc;
+ }
+
+ public Path getSnapshotsDir() {
+ return this.snapshotDir;
+ }
+
+ public Path[] addRegion() throws IOException {
+ return addRegion(desc);
+ }
+
+ public Path[] addRegionV1() throws IOException {
+ return addRegion(desc.toBuilder()
+ .setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION).build());
+ }
+
+ public Path[] addRegionV2() throws IOException {
+ return addRegion(desc.toBuilder()
+ .setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION).build());
+ }
+
+ private Path[] addRegion(final SnapshotDescription desc)
+ throws IOException {
+ if (this.snapshotted == tableRegions.length) {
+ throw new UnsupportedOperationException(
+ "No more regions in the table");
+ }
+
+ RegionData regionData = tableRegions[this.snapshotted++];
+ ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(
+ desc.getName());
+ SnapshotManifest manifest = SnapshotManifest.create(conf, fs,
+ snapshotDir, desc, monitor);
+ manifest.addRegion(regionData.tableDir, regionData.hri);
+ return regionData.files;
+ }
+
+ public Path commit() throws IOException {
+ ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(
+ desc.getName());
+ SnapshotManifest manifest = SnapshotManifest.create(conf, fs,
+ snapshotDir, desc, monitor);
+ manifest.addTableDescriptor(htd);
+ manifest.consolidate();
+ SnapshotDescriptionUtils.completeSnapshot(desc, rootDir, snapshotDir,
+ fs);
+ snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc,
+ rootDir);
+ return snapshotDir;
+ }
+ }
+
+ public SnapshotMock(final Configuration conf, final FileSystem fs,
+ final Path rootDir) {
+ this.fs = fs;
+ this.conf = conf;
+ this.rootDir = rootDir;
+ }
+
+ public SnapshotBuilder createSnapshotV1(final String snapshotName)
+ throws IOException {
+ return createSnapshot(snapshotName, SnapshotManifestV1.DESCRIPTOR_VERSION);
+ }
+
+ public SnapshotBuilder createSnapshotV2(final String snapshotName)
+ throws IOException {
+ return createSnapshot(snapshotName, SnapshotManifestV2.DESCRIPTOR_VERSION);
+ }
+
+ private SnapshotBuilder createSnapshot(final String snapshotName,
+ final int version) throws IOException {
+ HTableDescriptor htd = createHtd(snapshotName);
+
+ RegionData[] regions = createTable(htd, TEST_NUM_REGIONS);
+
+ SnapshotDescription desc = SnapshotDescription.newBuilder()
+ .setTable(htd.getNameAsString()).setName(snapshotName)
+ .setVersion(version).build();
+
+ Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc,
+ rootDir);
+ SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, fs);
+ return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
+ }
+
+ public HTableDescriptor createHtd(final String tableName) {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0L);
+ htd.addFamily(hcd);
+ return htd;
+ }
+
+ private RegionData[] createTable(final HTableDescriptor htd,
+ final int nregions) throws IOException {
+ Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
+ new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(
+ tableDir, new TableDescriptor(htd), false);
+
+ assertTrue(nregions % 2 == 0);
+ RegionData[] regions = new RegionData[nregions];
+ for (int i = 0; i < regions.length; i += 2) {
+ byte[] startKey = Bytes.toBytes(0 + i * 2);
+ byte[] endKey = Bytes.toBytes(1 + i * 2);
+
+ // First region, simple with one plain hfile.
+ HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
+ HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(
+ conf, fs, tableDir, hri);
+ regions[i] = new RegionData(tableDir, hri, 3);
+ for (int j = 0; j < regions[i].files.length; ++j) {
+ Path storeFile = createStoreFile(rfs.createTempName());
+ regions[i].files[j] = rfs.commitStoreFile(TEST_FAMILY, storeFile);
+ }
+
+ // Second region, used to test the split case.
+ // This region contains a reference to the hfile in the first region.
+ startKey = Bytes.toBytes(2 + i * 2);
+ endKey = Bytes.toBytes(3 + i * 2);
+ hri = new HRegionInfo(htd.getTableName());
+ rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir,
+ hri);
+ regions[i + 1] = new RegionData(tableDir, hri, regions[i].files.length);
+ for (int j = 0; j < regions[i].files.length; ++j) {
+ String refName = regions[i].files[j].getName() + '.'
+ + regions[i].hri.getEncodedName();
+ Path refFile = createStoreFile(new Path(rootDir, refName));
+ regions[i + 1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile);
+ }
+ }
+ return regions;
+ }
+
+ private Path createStoreFile(final Path storeFile) throws IOException {
+ FSDataOutputStream out = fs.create(storeFile);
+ try {
+ out.write(Bytes.toBytes(storeFile.toString()));
+ } finally {
+ out.close();
+ }
+ return storeFile;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
index 5517f4a,0000000..f7a9918
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java
@@@ -1,551 -1,0 +1,548 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
- import org.apache.hadoop.hbase.client.Admin;
- import org.apache.hadoop.hbase.client.HTable;
- import org.apache.hadoop.hbase.client.ScannerCallable;
- import org.apache.hadoop.hbase.ipc.RpcClient;
++import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.log4j.Level;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test creating/using/deleting snapshots from the client
+ * <p>
+ * This is an end-to-end test for the snapshot utility
+ *
+ * TODO This is essentially a clone of TestSnapshotFromClient. This is worth refactoring this
+ * because there will be a few more flavors of snapshots that need to run these tests.
+ */
+@Category({ClientTests.class, LargeTests.class})
+public class TestMobFlushSnapshotFromClient {
+ private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class);
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static final int NUM_RS = 2;
+ private static final String STRING_TABLE_NAME = "test";
+ private static final byte[] TEST_FAM = Bytes.toBytes("fam");
+ private static final TableName TABLE_NAME =
+ TableName.valueOf(STRING_TABLE_NAME);
+ private final int DEFAULT_NUM_ROWS = 100;
+
+ /**
+ * Setup the config for the cluster
+ * @throws Exception on failure
+ */
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(NUM_RS);
+ }
+
+ private static void setupConf(Configuration conf) {
+ // disable the ui
+ conf.setInt("hbase.regionsever.info.port", -1);
+ // change the flush size to a small amount, regulating number of store files
+ conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+ // so make sure we get a compaction when doing a load, but keep around some
+ // files in the store
+ conf.setInt("hbase.hstore.compaction.min", 10);
+ conf.setInt("hbase.hstore.compactionThreshold", 10);
+ // block writes if we get to 12 store files
+ conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+ // Enable snapshot
+ conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+ conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+ ConstantSizeRegionSplitPolicy.class.getName());
+ conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+ }
+
+ @Before
+ public void setup() throws Exception {
+ MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, 1, TEST_FAM);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ UTIL.deleteTable(TABLE_NAME);
+
+ SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ /**
+ * Test simple flush snapshotting a table that is online
+ * @throws Exception
+ */
+ @Test (timeout=300000)
+ public void testFlushTableSnapshot() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ // put some stuff in the table
- HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
++ Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
+
+ LOG.debug("FS state before snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ // take a snapshot of the enabled table
+ String snapshotString = "offlineTableSnapshot";
+ byte[] snapshot = Bytes.toBytes(snapshotString);
+ admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.FLUSH);
+ LOG.debug("Snapshot completed.");
+
+ // make sure we have the snapshot
+ List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+ snapshot, TABLE_NAME);
+
+ // make sure its a valid snapshot
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ LOG.debug("FS state after snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
+ admin, fs);
+ }
+
+ /**
+ * Test snapshotting a table that is online without flushing
+ * @throws Exception
+ */
+ @Test(timeout=30000)
+ public void testSkipFlushTableSnapshot() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ // put some stuff in the table
- HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
++ Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+ UTIL.loadTable(table, TEST_FAM);
+
+ LOG.debug("FS state before snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ // take a snapshot of the enabled table
+ String snapshotString = "skipFlushTableSnapshot";
+ byte[] snapshot = Bytes.toBytes(snapshotString);
+ admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.SKIPFLUSH);
+ LOG.debug("Snapshot completed.");
+
+ // make sure we have the snapshot
+ List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+ snapshot, TABLE_NAME);
+
+ // make sure its a valid snapshot
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ LOG.debug("FS state after snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
+ admin, fs);
+
+ admin.deleteSnapshot(snapshot);
+ snapshots = admin.listSnapshots();
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ }
+
+
+ /**
+ * Test simple flush snapshotting a table that is online
+ * @throws Exception
+ */
+ @Test (timeout=300000)
+ public void testFlushTableSnapshotWithProcedure() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ // put some stuff in the table
- HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
++ Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
+
+ LOG.debug("FS state before snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ // take a snapshot of the enabled table
+ String snapshotString = "offlineTableSnapshot";
+ byte[] snapshot = Bytes.toBytes(snapshotString);
+ Map<String, String> props = new HashMap<String, String>();
+ props.put("table", TABLE_NAME.getNameAsString());
+ admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION,
+ snapshotString, props);
+
+
+ LOG.debug("Snapshot completed.");
+
+ // make sure we have the snapshot
+ List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
+ snapshot, TABLE_NAME);
+
+ // make sure its a valid snapshot
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ LOG.debug("FS state after snapshot:");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+
+ SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
+ admin, fs);
+ }
+
+ @Test (timeout=300000)
+ public void testSnapshotFailsOnNonExistantTable() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ TableName tableName = TableName.valueOf("_not_a_table");
+
+ // make sure the table doesn't exist
+ boolean fail = false;
+ do {
+ try {
+ admin.getTableDescriptor(tableName);
+ fail = true;
+ LOG.error("Table:" + tableName + " already exists, checking a new name");
+ tableName = TableName.valueOf(tableName+"!");
+ } catch (TableNotFoundException e) {
+ fail = false;
+ }
+ } while (fail);
+
+ // snapshot the non-existant table
+ try {
+ admin.snapshot("fail", tableName, SnapshotDescription.Type.FLUSH);
+ fail("Snapshot succeeded even though there is not table.");
+ } catch (SnapshotCreationException e) {
+ LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
+ }
+ }
+
+ @Test(timeout = 300000)
+ public void testAsyncFlushSnapshot() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot")
+ .setTable(TABLE_NAME.getNameAsString())
+ .setType(SnapshotDescription.Type.FLUSH)
+ .build();
+
+ // take the snapshot async
+ admin.takeSnapshotAsync(snapshot);
+
+ // constantly loop, looking for the snapshot to complete
+ HMaster master = UTIL.getMiniHBaseCluster().getMaster();
+ SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
+ LOG.info(" === Async Snapshot Completed ===");
+ FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
+ FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
+ // make sure we get the snapshot
+ SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot);
+ }
+
+ @Test (timeout=300000)
+ public void testSnapshotStateAfterMerge() throws Exception {
+ int numRows = DEFAULT_NUM_ROWS;
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ // load the table so we have some data
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);
+
+ // Take a snapshot
+ String snapshotBeforeMergeName = "snapshotBeforeMerge";
+ admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, SnapshotDescription.Type.FLUSH);
+
+ // Clone the table
+ TableName cloneBeforeMergeName = TableName.valueOf("cloneBeforeMerge");
+ admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName);
+ SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneBeforeMergeName);
+
+ // Merge two regions
+ List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
+ Collections.sort(regions, new Comparator<HRegionInfo>() {
+ public int compare(HRegionInfo r1, HRegionInfo r2) {
+ return Bytes.compareTo(r1.getStartKey(), r2.getStartKey());
+ }
+ });
+
+ int numRegions = admin.getTableRegions(TABLE_NAME).size();
+ int numRegionsAfterMerge = numRegions - 2;
+ admin.mergeRegions(regions.get(1).getEncodedNameAsBytes(),
+ regions.get(2).getEncodedNameAsBytes(), true);
+ admin.mergeRegions(regions.get(5).getEncodedNameAsBytes(),
+ regions.get(6).getEncodedNameAsBytes(), true);
+
+ // Verify that there's one region less
+ waitRegionsAfterMerge(numRegionsAfterMerge);
+ assertEquals(numRegionsAfterMerge, admin.getTableRegions(TABLE_NAME).size());
+
+ // Clone the table
+ TableName cloneAfterMergeName = TableName.valueOf("cloneAfterMerge");
+ admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName);
+ SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneAfterMergeName);
+
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, TABLE_NAME, numRows);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneBeforeMergeName, numRows);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneAfterMergeName, numRows);
+
+ // test that we can delete the snapshot
+ UTIL.deleteTable(cloneAfterMergeName);
+ UTIL.deleteTable(cloneBeforeMergeName);
+ }
+
+ @Test (timeout=300000)
+ public void testTakeSnapshotAfterMerge() throws Exception {
+ int numRows = DEFAULT_NUM_ROWS;
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ // load the table so we have some data
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);
+
+ // Merge two regions
+ List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
+ Collections.sort(regions, new Comparator<HRegionInfo>() {
+ public int compare(HRegionInfo r1, HRegionInfo r2) {
+ return Bytes.compareTo(r1.getStartKey(), r2.getStartKey());
+ }
+ });
+
+ int numRegions = admin.getTableRegions(TABLE_NAME).size();
+ int numRegionsAfterMerge = numRegions - 2;
+ admin.mergeRegions(regions.get(1).getEncodedNameAsBytes(),
+ regions.get(2).getEncodedNameAsBytes(), true);
+ admin.mergeRegions(regions.get(5).getEncodedNameAsBytes(),
+ regions.get(6).getEncodedNameAsBytes(), true);
+
+ waitRegionsAfterMerge(numRegionsAfterMerge);
+ assertEquals(numRegionsAfterMerge, admin.getTableRegions(TABLE_NAME).size());
+
+ // Take a snapshot
+ String snapshotName = "snapshotAfterMerge";
+ SnapshotTestingUtils.snapshot(admin, snapshotName, TABLE_NAME.getNameAsString(),
+ SnapshotDescription.Type.FLUSH, 3);
+
+ // Clone the table
+ TableName cloneName = TableName.valueOf("cloneMerge");
+ admin.cloneSnapshot(snapshotName, cloneName);
+ SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneName);
+
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, TABLE_NAME, numRows);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneName, numRows);
+
+ // test that we can delete the snapshot
+ UTIL.deleteTable(cloneName);
+ }
+
+ /**
+ * Basic end-to-end test of simple-flush-based snapshots
+ */
+ @Test (timeout=300000)
+ public void testFlushCreateListDestroy() throws Exception {
+ LOG.debug("------- Starting Snapshot test -------------");
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ // load the table so we have some data
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
+
+ String snapshotName = "flushSnapshotCreateListDestroy";
+ FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ SnapshotTestingUtils.createSnapshotAndValidate(admin, TABLE_NAME, Bytes.toString(TEST_FAM),
+ snapshotName, rootDir, fs, true);
+ }
+
+ /**
+ * Demonstrate that we reject snapshot requests if there is a snapshot already running on the
+ * same table currently running and that concurrent snapshots on different tables can both
- * succeed concurretly.
++ * succeed concurrently.
+ */
+ @Test(timeout=300000)
+ public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException {
+ final String STRING_TABLE2_NAME = STRING_TABLE_NAME + "2";
+ final TableName TABLE2_NAME =
+ TableName.valueOf(STRING_TABLE2_NAME);
+
+ int ssNum = 20;
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+ // create second testing table
+ SnapshotTestingUtils.createTable(UTIL, TABLE2_NAME, TEST_FAM);
+ // load the table so we have some data
+ SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
+ SnapshotTestingUtils.loadData(UTIL, TABLE2_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
+
+ final CountDownLatch toBeSubmitted = new CountDownLatch(ssNum);
+ // We'll have one of these per thread
+ class SSRunnable implements Runnable {
+ SnapshotDescription ss;
+ SSRunnable(SnapshotDescription ss) {
+ this.ss = ss;
+ }
+
+ @Override
+ public void run() {
+ try {
+ Admin admin = UTIL.getHBaseAdmin();
+ LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss));
+ admin.takeSnapshotAsync(ss);
+ } catch (Exception e) {
+ LOG.info("Exception during snapshot request: " + ClientSnapshotDescriptionUtils.toString(
+ ss)
+ + ". This is ok, we expect some", e);
+ }
+ LOG.info("Submitted snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss));
+ toBeSubmitted.countDown();
+ }
+ };
+
+ // build descriptions
+ SnapshotDescription[] descs = new SnapshotDescription[ssNum];
+ for (int i = 0; i < ssNum; i++) {
+ SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
+ builder.setTable(((i % 2) == 0 ? TABLE_NAME : TABLE2_NAME).getNameAsString());
+ builder.setName("ss"+i);
+ builder.setType(SnapshotDescription.Type.FLUSH);
+ descs[i] = builder.build();
+ }
+
+ // kick each off its own thread
+ for (int i=0 ; i < ssNum; i++) {
+ new Thread(new SSRunnable(descs[i])).start();
+ }
+
+ // wait until all have been submitted
+ toBeSubmitted.await();
+
+ // loop until all are done.
+ while (true) {
+ int doneCount = 0;
+ for (SnapshotDescription ss : descs) {
+ try {
+ if (admin.isSnapshotFinished(ss)) {
+ doneCount++;
+ }
+ } catch (Exception e) {
+ LOG.warn("Got an exception when checking for snapshot " + ss.getName(), e);
+ doneCount++;
+ }
+ }
+ if (doneCount == descs.length) {
+ break;
+ }
+ Thread.sleep(100);
+ }
+
+ // dump for debugging
+ logFSTree(FSUtils.getRootDir(UTIL.getConfiguration()));
+
+ List<SnapshotDescription> taken = admin.listSnapshots();
+ int takenSize = taken.size();
+ LOG.info("Taken " + takenSize + " snapshots: " + taken);
+ assertTrue("We expect at least 1 request to be rejected because of we concurrently" +
+ " issued many requests", takenSize < ssNum && takenSize > 0);
+
+ // Verify that there's at least one snapshot per table
+ int t1SnapshotsCount = 0;
+ int t2SnapshotsCount = 0;
+ for (SnapshotDescription ss : taken) {
+ if (TableName.valueOf(ss.getTable()).equals(TABLE_NAME)) {
+ t1SnapshotsCount++;
+ } else if (TableName.valueOf(ss.getTable()).equals(TABLE2_NAME)) {
+ t2SnapshotsCount++;
+ }
+ }
+ assertTrue("We expect at least 1 snapshot of table1 ", t1SnapshotsCount > 0);
+ assertTrue("We expect at least 1 snapshot of table2 ", t2SnapshotsCount > 0);
+
+ UTIL.deleteTable(TABLE2_NAME);
+ }
+
+ private void logFSTree(Path root) throws IOException {
+ FSUtils.logFileSystemState(UTIL.getDFSCluster().getFileSystem(), root, LOG);
+ }
+
+ private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
+ throws IOException, InterruptedException {
+ Admin admin = UTIL.getHBaseAdmin();
+ // Verify that there's one region less
+ long startTime = System.currentTimeMillis();
+ while (admin.getTableRegions(TABLE_NAME).size() != numRegionsAfterMerge) {
+ // This may be flaky... if after 15sec the merge is not complete give up
+ // it will fail in the assertEquals(numRegionsAfterMerge).
+ if ((System.currentTimeMillis() - startTime) > 15000)
+ break;
+ Thread.sleep(100);
+ }
+ SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TABLE_NAME);
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java
index d281763,0000000..cb58b17
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java
@@@ -1,210 -1,0 +1,211 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
- import org.apache.hadoop.hbase.client.HTable;
++import org.apache.hadoop.hbase.client.ConnectionFactory;
++import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test clone/restore snapshots from the client
+ *
+ * TODO This is essentially a clone of TestRestoreSnapshotFromClient. This is worth refactoring
+ * this because there will be a few more flavors of snapshots that need to run these tests.
+ */
+@Category({ClientTests.class,LargeTests.class})
+public class TestMobRestoreFlushSnapshotFromClient {
+ final Log LOG = LogFactory.getLog(getClass());
+
+ private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private final byte[] FAMILY = Bytes.toBytes("cf");
+
+ private byte[] snapshotName0;
+ private byte[] snapshotName1;
+ private byte[] snapshotName2;
+ private int snapshot0Rows;
+ private int snapshot1Rows;
+ private TableName tableName;
+ private Admin admin;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
+ UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+ UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+ UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
+ UTIL.getConfiguration().setBoolean(
+ "hbase.master.enabletable.roundrobin", true);
+
+ // Enable snapshot
+ UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+ UTIL.getConfiguration().setLong(RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY,
+ RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT * 2);
+
+ UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+
+ UTIL.startMiniCluster(3);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ UTIL.shutdownMiniCluster();
+ }
+
+ /**
+ * Initialize the tests with a table filled with some data
+ * and two snapshots (snapshotName0, snapshotName1) of different states.
+ * The tableName, snapshotNames and the number of rows in the snapshot are initialized.
+ */
+ @Before
+ public void setup() throws Exception {
+ this.admin = UTIL.getHBaseAdmin();
+
+ long tid = System.currentTimeMillis();
+ tableName = TableName.valueOf("testtb-" + tid);
+ snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
+ snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
+ snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
+
+ // create Table
+ MobSnapshotTestingUtils.createMobTable(UTIL, tableName, 1, FAMILY);
+
- HTable table = new HTable(UTIL.getConfiguration(), tableName);
++ Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(tableName);
+ SnapshotTestingUtils.loadData(UTIL, tableName, 500, FAMILY);
+ snapshot0Rows = MobSnapshotTestingUtils.countMobRows(table);
+ LOG.info("=== before snapshot with 500 rows");
+ logFSTree();
+
+ // take a snapshot
+ admin.snapshot(Bytes.toString(snapshotName0), tableName,
+ SnapshotDescription.Type.FLUSH);
+
+ LOG.info("=== after snapshot with 500 rows");
+ logFSTree();
+
+ // insert more data
+ SnapshotTestingUtils.loadData(UTIL, tableName, 500, FAMILY);
+ snapshot1Rows = MobSnapshotTestingUtils.countMobRows(table);
+ LOG.info("=== before snapshot with 1000 rows");
+ logFSTree();
+
+ // take a snapshot of the updated table
+ admin.snapshot(Bytes.toString(snapshotName1), tableName,
+ SnapshotDescription.Type.FLUSH);
+ LOG.info("=== after snapshot with 1000 rows");
+ logFSTree();
+ table.close();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
+ }
+
+ @Test
+ public void testTakeFlushSnapshot() throws IOException {
+ // taking happens in setup.
+ }
+
+ @Test
+ public void testRestoreSnapshot() throws IOException {
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshot1Rows);
+
+ // Restore from snapshot-0
+ admin.disableTable(tableName);
+ admin.restoreSnapshot(snapshotName0);
+ logFSTree();
+ admin.enableTable(tableName);
+ LOG.info("=== after restore with 500 row snapshot");
+ logFSTree();
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshot0Rows);
+
+ // Restore from snapshot-1
+ admin.disableTable(tableName);
+ admin.restoreSnapshot(snapshotName1);
+ admin.enableTable(tableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshot1Rows);
+ }
+
+ @Test(expected=SnapshotDoesNotExistException.class)
+ public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
+ String snapshotName = "random-snapshot-" + System.currentTimeMillis();
+ TableName tableName = TableName.valueOf("random-table-" + System.currentTimeMillis());
+ admin.cloneSnapshot(snapshotName, tableName);
+ }
+
+ @Test
+ public void testCloneSnapshot() throws IOException, InterruptedException {
+ TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
+ testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
+ testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
+ }
+
+ private void testCloneSnapshot(final TableName tableName, final byte[] snapshotName,
+ int snapshotRows) throws IOException, InterruptedException {
+ // create a new table from snapshot
+ admin.cloneSnapshot(snapshotName, tableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshotRows);
+
+ UTIL.deleteTable(tableName);
+ }
+
+ @Test
+ public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
+ TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
+ admin.cloneSnapshot(snapshotName0, clonedTableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, clonedTableName, snapshot0Rows);
+ admin.snapshot(Bytes.toString(snapshotName2), clonedTableName, SnapshotDescription.Type.FLUSH);
+ UTIL.deleteTable(clonedTableName);
+
+ admin.cloneSnapshot(snapshotName2, clonedTableName);
+ MobSnapshotTestingUtils.verifyMobRowCount(UTIL, clonedTableName, snapshot0Rows);
+ UTIL.deleteTable(clonedTableName);
+ }
+
+ // ==========================================================================
+ // Helpers
+ // ==========================================================================
+ private void logFSTree() throws IOException {
+ MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+ FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
index 1893c7a,0000000..70b4312
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
@@@ -1,163 -1,0 +1,159 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
- import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils.SnapshotMock;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+/**
+ * Test the restore/clone operation from a file-system point of view.
+ */
+@Category(SmallTests.class)
+public class TestMobRestoreSnapshotHelper {
+ final Log LOG = LogFactory.getLog(getClass());
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private final static String TEST_HFILE = "abc";
+
+ private Configuration conf;
- private Path archiveDir;
+ private FileSystem fs;
+ private Path rootDir;
+
+ @Before
+ public void setup() throws Exception {
+ rootDir = TEST_UTIL.getDataTestDir("testRestore");
- archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
+ fs = TEST_UTIL.getTestFileSystem();
+ TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
+ conf = TEST_UTIL.getConfiguration();
+ FSUtils.setRootDir(conf, rootDir);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ fs.delete(TEST_UTIL.getDataTestDir(), true);
+ }
+
+ @Test
+ public void testRestore() throws IOException {
+ // Test Rolling-Upgrade like Snapshot.
+ // half machines writing using v1 and the others using v2 format.
+ SnapshotMock snapshotMock = new SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
+ SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("snapshot");
+ builder.addRegionV1();
+ builder.addRegionV2();
+ builder.addRegionV2();
+ builder.addRegionV1();
+ Path snapshotDir = builder.commit();
+ HTableDescriptor htd = builder.getTableDescriptor();
+ SnapshotDescription desc = builder.getSnapshotDescription();
+
+ // Test clone a snapshot
+ HTableDescriptor htdClone = snapshotMock.createHtd("testtb-clone");
+ testRestore(snapshotDir, desc, htdClone);
+ verifyRestore(rootDir, htd, htdClone);
+
+ // Test clone a clone ("link to link")
+ SnapshotDescription cloneDesc = SnapshotDescription.newBuilder()
+ .setName("cloneSnapshot")
+ .setTable("testtb-clone")
+ .build();
+ Path cloneDir = FSUtils.getTableDir(rootDir, htdClone.getTableName());
+ HTableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2");
+ testRestore(cloneDir, cloneDesc, htdClone2);
+ verifyRestore(rootDir, htd, htdClone2);
+ }
+
+ private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
+ final HTableDescriptor htdClone) throws IOException {
+ String[] files = SnapshotTestingUtils.listHFileNames(fs,
+ FSUtils.getTableDir(rootDir, htdClone.getTableName()));
+ assertEquals(12, files.length);
+ for (int i = 0; i < files.length; i += 2) {
+ String linkFile = files[i];
+ String refFile = files[i+1];
+ assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile));
+ assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile));
+ assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile));
+ Path refPath = getReferredToFile(refFile);
+ LOG.debug("get reference name for file " + refFile + " = " + refPath);
+ assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName()));
+ assertEquals(linkFile, refPath.getName());
+ }
+ }
+
+ /**
+ * Execute the restore operation
+ * @param snapshotDir The snapshot directory to use as "restore source"
+ * @param sd The snapshot descriptor
+ * @param htdClone The HTableDescriptor of the table to restore/clone.
+ */
+ public void testRestore(final Path snapshotDir, final SnapshotDescription sd,
+ final HTableDescriptor htdClone) throws IOException {
+ LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+ new FSTableDescriptors(conf).createTableDescriptor(htdClone);
+ RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sd, htdClone);
+ helper.restoreHdfsRegions();
+
+ LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+ }
+
+ /**
+ * Initialize the restore helper, based on the snapshot and table information provided.
+ */
+ private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir,
+ final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException {
+ ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
+ MonitoredTask status = Mockito.mock(MonitoredTask.class);
+
+ SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd);
+ return new RestoreSnapshotHelper(conf, fs, manifest,
+ htdClone, rootDir, monitor, status);
+ }
+
+ private Path getReferredToFile(final String referenceName) {
+ Path fakeBasePath = new Path(new Path("table", "region"), "cf");
+ return StoreFileInfo.getReferredToFile(new Path(fakeBasePath, referenceName));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java
----------------------------------------------------------------------
diff --cc hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java
index 6ce4252,0000000..006316a
mode 100644,000000..100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java
@@@ -1,73 -1,0 +1,73 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.util.Arrays;
+
+import org.apache.hadoop.hbase.util.test.LoadTestKVGenerator;
+
+/**
+ * A load test data generator for MOB
+ */
+public class LoadTestDataGeneratorWithMOB
+ extends MultiThreadedAction.DefaultDataGenerator {
+
+ private byte[] mobColumnFamily;
+ private LoadTestKVGenerator mobKvGenerator;
+
+ public LoadTestDataGeneratorWithMOB(int minValueSize, int maxValueSize,
+ int minColumnsPerKey, int maxColumnsPerKey, byte[]... columnFamilies) {
+ super(minValueSize, maxValueSize, minColumnsPerKey, maxColumnsPerKey,
+ columnFamilies);
+ }
+
+ public LoadTestDataGeneratorWithMOB(byte[]... columnFamilies) {
+ super(columnFamilies);
+ }
+
+ @Override
+ public void initialize(String[] args) {
+ super.initialize(args);
+ if (args.length != 3) {
+ throw new IllegalArgumentException(
+ "LoadTestDataGeneratorWithMOB can have 3 arguments."
- + "1st arguement is a column family, the 2nd argument "
++ + "1st argument is a column family, the 2nd argument "
+ + "is the minimum mob data size and the 3rd argument "
+ + "is the maximum mob data size.");
+ }
+ String mobColumnFamily = args[0];
+ int minMobDataSize = Integer.parseInt(args[1]);
+ int maxMobDataSize = Integer.parseInt(args[2]);
+ configureMob(Bytes.toBytes(mobColumnFamily), minMobDataSize, maxMobDataSize);
+ }
+
+ private void configureMob(byte[] mobColumnFamily, int minMobDataSize,
+ int maxMobDataSize) {
+ this.mobColumnFamily = mobColumnFamily;
+ mobKvGenerator = new LoadTestKVGenerator(minMobDataSize, maxMobDataSize);
+ }
+
+ @Override
+ public byte[] generateValue(byte[] rowKey, byte[] cf,
+ byte[] column) {
+ if(Arrays.equals(cf, mobColumnFamily))
+ return mobKvGenerator.generateRandomSizeValue(rowKey, cf, column);
+
+ return super.generateValue(rowKey, cf, column);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/pom.xml
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e20bbf6/src/main/asciidoc/book.adoc
----------------------------------------------------------------------