You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by el...@apache.org on 2017/08/23 16:47:25 UTC
[28/36] hbase git commit: HBASE-17614: Move Backup/Restore into
separate module (Vladimir Rodionov)
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
new file mode 100644
index 0000000..7011ed3
--- /dev/null
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
@@ -0,0 +1,336 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
+ private static final Log LOG = LogFactory.getLog(TestIncrementalBackupMergeWithFailures.class);
+
+ static enum FailurePhase {
+ PHASE1, PHASE2, PHASE3, PHASE4
+ }
+ public final static String FAILURE_PHASE_KEY = "failurePhase";
+
+ static class BackupMergeJobWithFailures extends MapReduceBackupMergeJob {
+
+ FailurePhase failurePhase;
+
+ @Override
+ public void setConf(Configuration conf) {
+ super.setConf(conf);
+ String val = conf.get(FAILURE_PHASE_KEY);
+ if (val != null) {
+ failurePhase = FailurePhase.valueOf(val);
+ } else {
+ Assert.fail("Failure phase is not set");
+ }
+ }
+
+
+ /**
+ * This is the exact copy of parent's run() with injections
+ * of different types of failures
+ */
+ @Override
+ public void run(String[] backupIds) throws IOException {
+ String bulkOutputConfKey;
+
+ // TODO : run player on remote cluster
+ player = new MapReduceHFileSplitterJob();
+ bulkOutputConfKey = MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY;
+ // Player reads all files in arbitrary directory structure and creates
+ // a Map task for each file
+ String bids = StringUtils.join(backupIds, ",");
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Merge backup images " + bids);
+ }
+
+ List<Pair<TableName, Path>> processedTableList = new ArrayList<Pair<TableName, Path>>();
+ boolean finishedTables = false;
+ Connection conn = ConnectionFactory.createConnection(getConf());
+ BackupSystemTable table = new BackupSystemTable(conn);
+ FileSystem fs = FileSystem.get(getConf());
+
+ try {
+
+ // Start backup exclusive operation
+ table.startBackupExclusiveOperation();
+ // Start merge operation
+ table.startMergeOperation(backupIds);
+
+ // Select most recent backup id
+ String mergedBackupId = findMostRecentBackupId(backupIds);
+
+ TableName[] tableNames = getTableNamesInBackupImages(backupIds);
+ String backupRoot = null;
+
+ BackupInfo bInfo = table.readBackupInfo(backupIds[0]);
+ backupRoot = bInfo.getBackupRootDir();
+ // PHASE 1
+ checkFailure(FailurePhase.PHASE1);
+
+ for (int i = 0; i < tableNames.length; i++) {
+
+ LOG.info("Merge backup images for " + tableNames[i]);
+
+ // Find input directories for table
+
+ Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds);
+ String dirs = StringUtils.join(dirPaths, ",");
+ Path bulkOutputPath =
+ BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]),
+ getConf(), false);
+ // Delete content if exists
+ if (fs.exists(bulkOutputPath)) {
+ if (!fs.delete(bulkOutputPath, true)) {
+ LOG.warn("Can not delete: " + bulkOutputPath);
+ }
+ }
+ Configuration conf = getConf();
+ conf.set(bulkOutputConfKey, bulkOutputPath.toString());
+ String[] playerArgs = { dirs, tableNames[i].getNameAsString() };
+
+ int result = 0;
+ // PHASE 2
+ checkFailure(FailurePhase.PHASE2);
+ player.setConf(getConf());
+ result = player.run(playerArgs);
+ if (succeeded(result)) {
+ // Add to processed table list
+ processedTableList.add(new Pair<TableName, Path>(tableNames[i], bulkOutputPath));
+ } else {
+ throw new IOException("Can not merge backup images for " + dirs
+ + " (check Hadoop/MR and HBase logs). Player return code =" + result);
+ }
+ LOG.debug("Merge Job finished:" + result);
+ }
+ List<TableName> tableList = toTableNameList(processedTableList);
+ // PHASE 3
+ checkFailure(FailurePhase.PHASE3);
+ table.updateProcessedTablesForMerge(tableList);
+ finishedTables = true;
+
+ // Move data
+ for (Pair<TableName, Path> tn : processedTableList) {
+ moveData(fs, backupRoot, tn.getSecond(), tn.getFirst(), mergedBackupId);
+ }
+ // PHASE 4
+ checkFailure(FailurePhase.PHASE4);
+ // Delete old data and update manifest
+ List<String> backupsToDelete = getBackupIdsToDelete(backupIds, mergedBackupId);
+ deleteBackupImages(backupsToDelete, conn, fs, backupRoot);
+ updateBackupManifest(backupRoot, mergedBackupId, backupsToDelete);
+ // Finish merge session
+ table.finishMergeOperation();
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ LOG.error(e);
+ if (!finishedTables) {
+ // cleanup bulk directories and finish merge
+ // merge MUST be repeated (no need for repair)
+ cleanupBulkLoadDirs(fs, toPathList(processedTableList));
+ table.finishMergeOperation();
+ table.finishBackupExclusiveOperation();
+ throw new IOException("Backup merge operation failed, you should try it again", e);
+ } else {
+ // backup repair must be run
+ throw new IOException(
+ "Backup merge operation failed, run backup repair tool to restore system's integrity",
+ e);
+ }
+ } finally {
+ table.close();
+ conn.close();
+ }
+
+ }
+
+ private void checkFailure(FailurePhase phase) throws IOException {
+ if ( failurePhase != null && failurePhase == phase) {
+ throw new IOException (phase.toString());
+ }
+ }
+
+ }
+
+
+ @Test
+ public void TestIncBackupMergeRestore() throws Exception {
+
+ int ADD_ROWS = 99;
+ // #1 - create full backup for all tables
+ LOG.info("create full backup image for all tables");
+
+ List<TableName> tables = Lists.newArrayList(table1, table2);
+ // Set custom Merge Job implementation
+ conf1.setClass(BackupRestoreFactory.HBASE_BACKUP_MERGE_IMPL_CLASS,
+ BackupMergeJobWithFailures.class, BackupMergeJob.class);
+
+ Connection conn = ConnectionFactory.createConnection(conf1);
+
+ HBaseAdmin admin = null;
+ admin = (HBaseAdmin) conn.getAdmin();
+ BackupAdminImpl client = new BackupAdminImpl(conn);
+
+ BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+ String backupIdFull = client.backupTables(request);
+
+ assertTrue(checkSucceeded(backupIdFull));
+
+ // #2 - insert some data to table1
+ HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
+ LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
+
+ Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS);
+ t1.close();
+ LOG.debug("written " + ADD_ROWS + " rows to " + table1);
+
+ HTable t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS);
+
+ Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS);
+ t2.close();
+ LOG.debug("written " + ADD_ROWS + " rows to " + table2);
+
+ // #3 - incremental backup for multiple tables
+ tables = Lists.newArrayList(table1, table2);
+ request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
+ String backupIdIncMultiple = client.backupTables(request);
+
+ assertTrue(checkSucceeded(backupIdIncMultiple));
+
+ t1 = insertIntoTable(conn, table1, famName, 2, ADD_ROWS);
+ t1.close();
+
+ t2 = insertIntoTable(conn, table2, famName, 2, ADD_ROWS);
+ t2.close();
+
+ // #3 - incremental backup for multiple tables
+ request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
+ String backupIdIncMultiple2 = client.backupTables(request);
+ assertTrue(checkSucceeded(backupIdIncMultiple2));
+
+ // #4 Merge backup images with failures
+
+ for ( FailurePhase phase : FailurePhase.values()) {
+ Configuration conf = conn.getConfiguration();
+
+ conf.set(FAILURE_PHASE_KEY, phase.toString());
+
+ try (BackupAdmin bAdmin = new BackupAdminImpl(conn);)
+ {
+ String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
+ bAdmin.mergeBackups(backups);
+ Assert.fail("Expected IOException");
+ } catch (IOException e) {
+ BackupSystemTable table = new BackupSystemTable(conn);
+ if(phase.ordinal() < FailurePhase.PHASE4.ordinal()) {
+ // No need to repair:
+ // Both Merge and backup exclusive operations are finished
+ assertFalse(table.isMergeInProgress());
+ try {
+ table.finishBackupExclusiveOperation();
+ Assert.fail("IOException is expected");
+ } catch(IOException ee) {
+ // Expected
+ }
+ } else {
+ // Repair is required
+ assertTrue(table.isMergeInProgress());
+ try {
+ table.startBackupExclusiveOperation();
+ Assert.fail("IOException is expected");
+ } catch(IOException ee) {
+ // Expected - clean up before proceeding
+ table.finishMergeOperation();
+ table.finishBackupExclusiveOperation();
+ }
+ }
+ table.close();
+ LOG.debug("Expected :"+ e.getMessage());
+ }
+ }
+
+ // Now merge w/o failures
+ Configuration conf = conn.getConfiguration();
+ conf.unset(FAILURE_PHASE_KEY);
+ conf.unset(BackupRestoreFactory.HBASE_BACKUP_MERGE_IMPL_CLASS);
+
+ try (BackupAdmin bAdmin = new BackupAdminImpl(conn);) {
+ String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
+ bAdmin.mergeBackups(backups);
+ }
+
+ // #6 - restore incremental backup for multiple tables, with overwrite
+ TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
+ TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
+ client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false,
+ tablesRestoreIncMultiple, tablesMapIncMultiple, true));
+
+ Table hTable = conn.getTable(table1_restore);
+ LOG.debug("After incremental restore: " + hTable.getTableDescriptor());
+ LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
+ Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + 2 * ADD_ROWS);
+
+ hTable.close();
+
+ hTable = conn.getTable(table2_restore);
+ Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 2 * ADD_ROWS);
+ hTable.close();
+
+ admin.close();
+ conn.close();
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
new file mode 100644
index 0000000..769785f
--- /dev/null
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.mapreduce.TestLoadIncrementalHFiles;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
+/**
+ * 1. Create table t1
+ * 2. Load data to t1
+ * 3 Full backup t1
+ * 4 Load data to t1
+ * 5 bulk load into t1
+ * 6 Incremental backup t1
+ */
+@Category(LargeTests.class)
+@RunWith(Parameterized.class)
+public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
+ private static final Log LOG = LogFactory.getLog(TestIncrementalBackupDeleteTable.class);
+
+ @Parameterized.Parameters
+ public static Collection<Object[]> data() {
+ secure = true;
+ List<Object[]> params = new ArrayList<Object[]>();
+ params.add(new Object[] {Boolean.TRUE});
+ return params;
+ }
+
+ public TestIncrementalBackupWithBulkLoad(Boolean b) {
+ }
+ // implement all test cases in 1 test since incremental backup/restore has dependencies
+ @Test
+ public void TestIncBackupDeleteTable() throws Exception {
+ String testName = "TestIncBackupDeleteTable";
+ // #1 - create full backup for all tables
+ LOG.info("create full backup image for all tables");
+
+ List<TableName> tables = Lists.newArrayList(table1);
+ HBaseAdmin admin = null;
+ Connection conn = ConnectionFactory.createConnection(conf1);
+ admin = (HBaseAdmin) conn.getAdmin();
+ BackupAdminImpl client = new BackupAdminImpl(conn);
+
+ BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+ String backupIdFull = client.backupTables(request);
+
+ assertTrue(checkSucceeded(backupIdFull));
+
+ // #2 - insert some data to table table1
+ HTable t1 = (HTable) conn.getTable(table1);
+ Put p1;
+ for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+ p1 = new Put(Bytes.toBytes("row-t1" + i));
+ p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t1.put(p1);
+ }
+
+ Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
+ t1.close();
+
+ int NB_ROWS2 = 20;
+ LOG.debug("bulk loading into " + testName);
+ int actual = TestLoadIncrementalHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName,
+ qualName, false, null, new byte[][][] {
+ new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
+ new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
+ }, true, false, true, NB_ROWS_IN_BATCH*2, NB_ROWS2);
+
+ // #3 - incremental backup for table1
+ tables = Lists.newArrayList(table1);
+ request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
+ String backupIdIncMultiple = client.backupTables(request);
+ assertTrue(checkSucceeded(backupIdIncMultiple));
+
+ // #5.1 - check tables for full restore */
+ HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+
+ // #6 - restore incremental backup for table1
+ TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
+ TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
+ client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple,
+ false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
+
+ HTable hTable = (HTable) conn.getTable(table1_restore);
+ Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2+actual);
+ request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+
+ backupIdFull = client.backupTables(request);
+ try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+ Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair
+ = table.readBulkloadRows(tables);
+ assertTrue("map still has " + pair.getSecond().size() + " entries",
+ pair.getSecond().isEmpty());
+ }
+ assertTrue(checkSucceeded(backupIdFull));
+
+ hTable.close();
+ admin.close();
+ conn.close();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
new file mode 100644
index 0000000..84a596e
--- /dev/null
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
+import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+@RunWith(Parameterized.class)
+public class TestIncrementalBackupWithFailures extends TestBackupBase {
+ private static final Log LOG = LogFactory.getLog(TestIncrementalBackupWithFailures.class);
+
+ @Parameterized.Parameters
+ public static Collection<Object[]> data() {
+ provider = "multiwal";
+ List<Object[]> params = new ArrayList<Object[]>();
+ params.add(new Object[] { Boolean.TRUE });
+ return params;
+ }
+
+ public TestIncrementalBackupWithFailures(Boolean b) {
+ }
+
+ // implement all test cases in 1 test since incremental backup/restore has dependencies
+ @Test
+ public void testIncBackupRestore() throws Exception {
+
+ int ADD_ROWS = 99;
+ // #1 - create full backup for all tables
+ LOG.info("create full backup image for all tables");
+
+ List<TableName> tables = Lists.newArrayList(table1, table2);
+ final byte[] fam3Name = Bytes.toBytes("f3");
+ table1Desc.addFamily(new HColumnDescriptor(fam3Name));
+ HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
+
+ Connection conn = ConnectionFactory.createConnection(conf1);
+ int NB_ROWS_FAM3 = 6;
+ insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
+
+ HBaseAdmin admin = null;
+ admin = (HBaseAdmin) conn.getAdmin();
+ BackupAdminImpl client = new BackupAdminImpl(conn);
+
+ BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+ String backupIdFull = client.backupTables(request);
+
+ assertTrue(checkSucceeded(backupIdFull));
+
+ // #2 - insert some data to table
+ HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
+ LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
+
+ Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
+ t1.close();
+ LOG.debug("written " + ADD_ROWS + " rows to " + table1);
+
+ HTable t2 = (HTable) conn.getTable(table2);
+ Put p2;
+ for (int i = 0; i < 5; i++) {
+ p2 = new Put(Bytes.toBytes("row-t2" + i));
+ p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t2.put(p2);
+ }
+
+ Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5);
+ t2.close();
+ LOG.debug("written " + 5 + " rows to " + table2);
+
+ // #3 - incremental backup for multiple tables
+ incrementalBackupWithFailures();
+
+ admin.close();
+ conn.close();
+
+ }
+
+
+ private void incrementalBackupWithFailures() throws Exception {
+ conf1.set(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS,
+ IncrementalTableBackupClientForTest.class.getName());
+ int maxStage = Stage.values().length -1;
+ // Fail stages between 0 and 4 inclusive
+ for (int stage = 0; stage <= maxStage; stage++) {
+ LOG.info("Running stage " + stage);
+ runBackupAndFailAtStage(stage);
+ }
+ }
+
+ private void runBackupAndFailAtStage(int stage) throws Exception {
+
+ conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage);
+ try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
+ int before = table.getBackupHistory().size();
+ String[] args =
+ new String[] { "create", "incremental", BACKUP_ROOT_DIR, "-t",
+ table1.getNameAsString() + "," + table2.getNameAsString() };
+ // Run backup
+ int ret = ToolRunner.run(conf1, new BackupDriver(), args);
+ assertFalse(ret == 0);
+ List<BackupInfo> backups = table.getBackupHistory();
+ int after = table.getBackupHistory().size();
+
+ assertTrue(after == before +1);
+ for (BackupInfo data : backups) {
+ if(data.getType() == BackupType.FULL) {
+ assertTrue(data.getState() == BackupState.COMPLETE);
+ } else {
+ assertTrue(data.getState() == BackupState.FAILED);
+ }
+ }
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
new file mode 100644
index 0000000..36a9ee2
--- /dev/null
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestRemoteBackup extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRemoteBackup.class);
+
+ @Override
+ public void setUp () throws Exception {
+ useSecondCluster = true;
+ super.setUp();
+ }
+
+ /**
+ * Verify that a remote full backup is created on a single table with data correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupRemote() throws Exception {
+ LOG.info("test remote full backup on a single table");
+ final CountDownLatch latch = new CountDownLatch(1);
+ final int NB_ROWS_IN_FAM3 = 6;
+ final byte[] fam3Name = Bytes.toBytes("f3");
+ final byte[] fam2Name = Bytes.toBytes("f2");
+ final Connection conn = ConnectionFactory.createConnection(conf1);
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException ie) {
+ }
+ try {
+ HTable t1 = (HTable) conn.getTable(table1);
+ Put p1;
+ for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
+ p1 = new Put(Bytes.toBytes("row-t1" + i));
+ p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i));
+ t1.put(p1);
+ }
+ LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3");
+ t1.close();
+ } catch (IOException ioe) {
+ throw new RuntimeException(ioe);
+ }
+ }
+ };
+ t.start();
+
+ table1Desc.addFamily(new HColumnDescriptor(fam3Name));
+ // family 2 is MOB enabled
+ HColumnDescriptor hcd = new HColumnDescriptor(fam2Name);
+ hcd.setMobEnabled(true);
+ hcd.setMobThreshold(0L);
+ table1Desc.addFamily(hcd);
+ HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
+
+ SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name);
+ HTable t1 = (HTable) conn.getTable(table1);
+ int rows0 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
+
+ latch.countDown();
+ String backupId =
+ backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR);
+ assertTrue(checkSucceeded(backupId));
+
+ LOG.info("backup complete " + backupId);
+ Assert.assertEquals(TEST_UTIL.countRows(t1, famName), NB_ROWS_IN_BATCH);
+
+ t.join();
+ Assert.assertEquals(TEST_UTIL.countRows(t1, fam3Name), NB_ROWS_IN_FAM3);
+ t1.close();
+
+ TableName[] tablesRestoreFull = new TableName[] { table1 };
+
+ TableName[] tablesMapFull = new TableName[] { table1_restore };
+
+ BackupAdmin client = getBackupAdmin();
+ client.restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false,
+ tablesRestoreFull, tablesMapFull, false));
+
+ // check tables for full restore
+ HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hAdmin.tableExists(table1_restore));
+
+ // #5.2 - checking row count of tables for full restore
+ HTable hTable = (HTable) conn.getTable(table1_restore);
+ Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH);
+ int cnt3 = TEST_UTIL.countRows(hTable, fam3Name);
+ Assert.assertTrue(cnt3 >= 0 && cnt3 <= NB_ROWS_IN_FAM3);
+
+ int rows1 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
+ Assert.assertEquals(rows0, rows1);
+ hTable.close();
+
+ hAdmin.close();
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
new file mode 100644
index 0000000..0386c27
--- /dev/null
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRemoteRestore extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRemoteRestore.class);
+
+ @Override
+ public void setUp () throws Exception {
+ useSecondCluster = true;
+ super.setUp();
+ }
+
+
+ /**
+ * Verify that a remote restore on a single table is successful.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreRemote() throws Exception {
+
+ LOG.info("test remote full backup on a single table");
+ String backupId =
+ backupTables(BackupType.FULL, toList(table1.getNameAsString()), BACKUP_REMOTE_ROOT_DIR);
+ LOG.info("backup complete");
+ TableName[] tableset = new TableName[] { table1 };
+ TableName[] tablemap = new TableName[] { table1_restore };
+ getBackupAdmin().restore(
+ BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset,
+ tablemap, false));
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table1_restore));
+ TEST_UTIL.deleteTable(table1_restore);
+ hba.close();
+ }
+
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
new file mode 100644
index 0000000..556521f
--- /dev/null
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestRepairAfterFailedDelete extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRepairAfterFailedDelete.class);
+
+ @Test
+ public void testRepairBackupDelete() throws Exception {
+ LOG.info("test repair backup delete on a single table with data");
+ List<TableName> tableList = Lists.newArrayList(table1);
+ String backupId = fullTableBackup(tableList);
+ assertTrue(checkSucceeded(backupId));
+ LOG.info("backup complete");
+ String[] backupIds = new String[] { backupId };
+ BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection());
+ BackupInfo info = table.readBackupInfo(backupId);
+ Path path = new Path(info.getBackupRootDir(), backupId);
+ FileSystem fs = FileSystem.get(path.toUri(), conf1);
+ assertTrue(fs.exists(path));
+
+ // Snapshot backup system table before delete
+ String snapshotName = "snapshot-backup";
+ Connection conn = TEST_UTIL.getConnection();
+ Admin admin = conn.getAdmin();
+ admin.snapshot(snapshotName, BackupSystemTable.getTableName(conf1));
+
+ int deleted = getBackupAdmin().deleteBackups(backupIds);
+
+ assertTrue(!fs.exists(path));
+ assertTrue(fs.exists(new Path(info.getBackupRootDir())));
+ assertTrue(1 == deleted);
+
+ // Emulate delete failure
+ // Restore backup system table
+ admin.disableTable(BackupSystemTable.getTableName(conf1));
+ admin.restoreSnapshot(snapshotName);
+ admin.enableTable(BackupSystemTable.getTableName(conf1));
+ // Start backup session
+ table.startBackupExclusiveOperation();
+ // Start delete operation
+ table.startDeleteOperation(backupIds);
+
+ // Now run repair command to repair "failed" delete operation
+ String[] args = new String[] {"repair"};
+ // Run restore
+ int ret = ToolRunner.run(conf1, new BackupDriver(), args);
+ assertTrue(ret == 0);
+ // Verify that history length == 0
+ assertTrue (table.getBackupHistory().size() == 0);
+ table.close();
+ admin.close();
+ }
+
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
new file mode 100644
index 0000000..c61b018
--- /dev/null
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRestoreBoundaryTests extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRestoreBoundaryTests.class);
+
+ /**
+ * Verify that a single empty table is restored to a new table
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreSingleEmpty() throws Exception {
+ LOG.info("test full restore on a single table empty table");
+ String backupId = fullTableBackup(toList(table1.getNameAsString()));
+ LOG.info("backup complete");
+ TableName[] tableset = new TableName[] { table1 };
+ TableName[] tablemap = new TableName[] { table1_restore };
+ getBackupAdmin().restore(
+ BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap,
+ false));
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table1_restore));
+ TEST_UTIL.deleteTable(table1_restore);
+ }
+
+ /**
+ * Verify that multiple tables are restored to new tables.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreMultipleEmpty() throws Exception {
+ LOG.info("create full backup image on multiple tables");
+
+ List<TableName> tables = toList(table2.getNameAsString(), table3.getNameAsString());
+ String backupId = fullTableBackup(tables);
+ TableName[] restore_tableset = new TableName[] { table2, table3 };
+ TableName[] tablemap = new TableName[] { table2_restore, table3_restore };
+ getBackupAdmin().restore(
+ BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset,
+ tablemap, false));
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table2_restore));
+ assertTrue(hba.tableExists(table3_restore));
+ TEST_UTIL.deleteTable(table2_restore);
+ TEST_UTIL.deleteTable(table3_restore);
+ hba.close();
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
new file mode 100644
index 0000000..6443421
--- /dev/null
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestSystemTableSnapshot extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestSystemTableSnapshot.class);
+
+ /**
+ * Verify backup system table snapshot
+ * @throws Exception
+ */
+ // @Test
+ public void _testBackupRestoreSystemTable() throws Exception {
+
+ LOG.info("test snapshot system table");
+
+ TableName backupSystem = BackupSystemTable.getTableName(conf1);
+
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ String snapshotName = "sysTable";
+ hba.snapshot(snapshotName, backupSystem);
+
+ hba.disableTable(backupSystem);
+ hba.restoreSnapshot(snapshotName);
+ hba.enableTable(backupSystem);
+ hba.close();
+ }
+
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
new file mode 100644
index 0000000..5f72f45
--- /dev/null
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.master;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.TestBackupBase;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Iterables;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestBackupLogCleaner extends TestBackupBase {
+ private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class);
+
+ // implements all test cases in 1 test since incremental full backup/
+ // incremental backup has dependencies
+ @Test
+ public void testBackupLogCleaner() throws Exception {
+
+ // #1 - create full backup for all tables
+ LOG.info("create full backup image for all tables");
+
+ List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, table3, table4);
+
+ try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) {
+ // Verify that we have no backup sessions yet
+ assertFalse(systemTable.hasBackupSessions());
+
+ List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
+ List<String> swalFiles = convert(walFiles);
+ BackupLogCleaner cleaner = new BackupLogCleaner();
+ cleaner.setConf(TEST_UTIL.getConfiguration());
+ cleaner.init(null);
+ cleaner.setConf(TEST_UTIL.getConfiguration());
+
+ Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
+ int size = Iterables.size(deletable);
+
+ // We can delete all files because we do not have yet recorded backup sessions
+ assertTrue(size == walFiles.size());
+
+ systemTable.addWALFiles(swalFiles, "backup", "root");
+ String backupIdFull = fullTableBackup(tableSetFullList);
+ assertTrue(checkSucceeded(backupIdFull));
+ // Check one more time
+ deletable = cleaner.getDeletableFiles(walFiles);
+ // We can delete wal files because they were saved into backup system table table
+ size = Iterables.size(deletable);
+ assertTrue(size == walFiles.size());
+
+ List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
+ LOG.debug("WAL list after full backup");
+ convert(newWalFiles);
+
+ // New list of wal files is greater than the previous one,
+ // because new wal per RS have been opened after full backup
+ assertTrue(walFiles.size() < newWalFiles.size());
+ Connection conn = ConnectionFactory.createConnection(conf1);
+ // #2 - insert some data to table
+ HTable t1 = (HTable) conn.getTable(table1);
+ Put p1;
+ for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+ p1 = new Put(Bytes.toBytes("row-t1" + i));
+ p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t1.put(p1);
+ }
+
+ t1.close();
+
+ HTable t2 = (HTable) conn.getTable(table2);
+ Put p2;
+ for (int i = 0; i < 5; i++) {
+ p2 = new Put(Bytes.toBytes("row-t2" + i));
+ p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t2.put(p2);
+ }
+
+ t2.close();
+
+ // #3 - incremental backup for multiple tables
+
+ List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
+ String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList,
+ BACKUP_ROOT_DIR);
+ assertTrue(checkSucceeded(backupIdIncMultiple));
+ deletable = cleaner.getDeletableFiles(newWalFiles);
+
+ assertTrue(Iterables.size(deletable) == newWalFiles.size());
+
+ conn.close();
+ }
+ }
+
+ private List<String> convert(List<FileStatus> walFiles) {
+ List<String> result = new ArrayList<String>();
+ for (FileStatus fs : walFiles) {
+ LOG.debug("+++WAL: " + fs.getPath().toString());
+ result.add(fs.getPath().toString());
+ }
+ return result;
+ }
+
+ private List<FileStatus> getListOfWALFiles(Configuration c) throws IOException {
+ Path logRoot = new Path(FSUtils.getRootDir(c), HConstants.HREGION_LOGDIR_NAME);
+ FileSystem fs = FileSystem.get(c);
+ RemoteIterator<LocatedFileStatus> it = fs.listFiles(logRoot, true);
+ List<FileStatus> logFiles = new ArrayList<FileStatus>();
+ while (it.hasNext()) {
+ LocatedFileStatus lfs = it.next();
+ if (lfs.isFile() && !AbstractFSWALProvider.isMetaFile(lfs.getPath())) {
+ logFiles.add(lfs);
+ LOG.info(lfs);
+ }
+ }
+ return logFiles;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/resources/log4j.properties b/hbase-backup/src/test/resources/log4j.properties
new file mode 100644
index 0000000..c322699
--- /dev/null
+++ b/hbase-backup/src/test/resources/log4j.properties
@@ -0,0 +1,68 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+# Debugging Pattern format
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+
+log4j.logger.org.apache.hadoop=WARN
+log4j.logger.org.apache.zookeeper=ERROR
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+
+#These settings are workarounds against spurious logs from the minicluster.
+#See HBASE-4709
+log4j.logger.org.apache.hadoop.metrics2.impl.MetricsConfig=WARN
+log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSinkAdapter=WARN
+log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=WARN
+log4j.logger.org.apache.hadoop.metrics2.util.MBeans=WARN
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-it/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index 5d8946f..5709fac 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -232,6 +232,11 @@
<artifactId>hbase-testing-util</artifactId>
</dependency>
<dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-backup</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.apache.hbase.thirdparty</groupId>
<artifactId>hbase-shaded-miscellaneous</artifactId>
</dependency>
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
deleted file mode 100644
index 9dc6382..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.util.BackupSet;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * The administrative API for HBase Backup. Construct an instance and call {@link #close()}
- * afterwards.
- * <p>
- * BackupAdmin can be used to create backups, restore data from backups and for other
- * backup-related operations.
- * @since 2.0
- */
-@InterfaceAudience.Private
-public interface BackupAdmin extends Closeable {
-
- /**
- * Backup given list of tables fully. This is a synchronous operation. It returns backup id on
- * success or throw exception on failure.
- * @param userRequest BackupRequest instance
- * @return the backup Id
- */
-
- String backupTables(final BackupRequest userRequest) throws IOException;
-
- /**
- * Restore backup
- * @param request restore request
- * @throws IOException exception
- */
- void restore(RestoreRequest request) throws IOException;
-
- /**
- * Describe backup image command
- * @param backupId backup id
- * @return backup info
- * @throws IOException exception
- */
- BackupInfo getBackupInfo(String backupId) throws IOException;
-
- /**
- * Delete backup image command
- * @param backupIds array of backup ids
- * @return total number of deleted sessions
- * @throws IOException exception
- */
- int deleteBackups(String[] backupIds) throws IOException;
-
- /**
- * Merge backup images command
- * @param backupIds array of backup ids of images to be merged
- * The resulting backup image will have the same backup id as the most
- * recent image from a list of images to be merged
- * @throws IOException exception
- */
- void mergeBackups(String[] backupIds) throws IOException;
-
- /**
- * Show backup history command
- * @param n last n backup sessions
- * @return list of backup info objects
- * @throws IOException exception
- */
- List<BackupInfo> getHistory(int n) throws IOException;
-
- /**
- * Show backup history command with filters
- * @param n last n backup sessions
- * @param f list of filters
- * @return list of backup info objects
- * @throws IOException exception
- */
- List<BackupInfo> getHistory(int n, BackupInfo.Filter... f) throws IOException;
-
- /**
- * Backup sets list command - list all backup sets. Backup set is a named group of tables.
- * @return all registered backup sets
- * @throws IOException exception
- */
- List<BackupSet> listBackupSets() throws IOException;
-
- /**
- * Backup set describe command. Shows list of tables in this particular backup set.
- * @param name set name
- * @return backup set description or null
- * @throws IOException exception
- */
- BackupSet getBackupSet(String name) throws IOException;
-
- /**
- * Delete backup set command
- * @param name backup set name
- * @return true, if success, false - otherwise
- * @throws IOException exception
- */
- boolean deleteBackupSet(String name) throws IOException;
-
- /**
- * Add tables to backup set command
- * @param name name of backup set.
- * @param tables array of tables to be added to this set.
- * @throws IOException exception
- */
- void addToBackupSet(String name, TableName[] tables) throws IOException;
-
- /**
- * Remove tables from backup set
- * @param name name of backup set.
- * @param tables array of tables to be removed from this set.
- * @throws IOException exception
- */
- void removeFromBackupSet(String name, TableName[] tables) throws IOException;
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java
deleted file mode 100644
index 21d73cc..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClientFactory.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient;
-import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient;
-import org.apache.hadoop.hbase.backup.impl.TableBackupClient;
-import org.apache.hadoop.hbase.client.Connection;
-
-public class BackupClientFactory {
-
- public static TableBackupClient create (Connection conn, String backupId, BackupRequest request)
- throws IOException
- {
- Configuration conf = conn.getConfiguration();
- try {
- String clsName = conf.get(TableBackupClient.BACKUP_CLIENT_IMPL_CLASS);
- if (clsName != null) {
- Class<?> clientImpl = Class.forName(clsName);
- TableBackupClient client = (TableBackupClient) clientImpl.newInstance();
- client.init(conn, backupId, request);
- return client;
- }
- } catch (Exception e) {
- throw new IOException(e);
- }
-
- BackupType type = request.getBackupType();
- if (type == BackupType.FULL) {
- return new FullTableBackupClient(conn, backupId, request);
- } else {
- return new IncrementalTableBackupClient(conn, backupId, request);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java
deleted file mode 100644
index 007e4c1..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyJob.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.backup.impl.BackupManager;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Backup copy job is a part of a backup process. The concrete implementation is responsible for
- * copying data from a cluster to backup destination. Concrete implementation is provided by backup
- * provider, see {@link BackupRestoreFactory}
- */
-@InterfaceAudience.Private
-public interface BackupCopyJob extends Configurable {
-
- /**
- * Copy backup data to destination
- * @param backupInfo context object
- * @param backupManager backup manager
- * @param conf configuration
- * @param backupType backup type (FULL or INCREMENTAL)
- * @param options array of options (implementation-specific)
- * @return result (0 - success, -1 failure )
- * @throws IOException exception
- */
- int copy(BackupInfo backupInfo, BackupManager backupManager, Configuration conf,
- BackupType backupType, String[] options) throws IOException;
-
- /**
- * Cancel copy job
- * @param jobHandler backup copy job handler
- * @throws IOException
- */
- void cancel(String jobHandler) throws IOException;
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
deleted file mode 100644
index 9dd8531..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup;
-
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
-import org.apache.hadoop.hbase.backup.impl.BackupCommands;
-import org.apache.hadoop.hbase.backup.impl.BackupManager;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.AbstractHBaseTool;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-
-/**
- *
- * Command-line entry point for backup operation
- *
- */
-@InterfaceAudience.Private
-public class BackupDriver extends AbstractHBaseTool {
-
- private static final Log LOG = LogFactory.getLog(BackupDriver.class);
- private CommandLine cmd;
-
- public BackupDriver() throws IOException {
- init();
- }
-
- protected void init() throws IOException {
- // disable irrelevant loggers to avoid it mess up command output
- LogUtils.disableZkAndClientLoggers(LOG);
- }
-
- private int parseAndRun(String[] args) throws IOException {
-
- // Check if backup is enabled
- if (!BackupManager.isBackupEnabled(getConf())) {
- System.err.println(BackupRestoreConstants.ENABLE_BACKUP);
- return -1;
- }
-
- System.out.println(BackupRestoreConstants.VERIFY_BACKUP);
-
- String cmd = null;
- String[] remainArgs = null;
- if (args == null || args.length == 0) {
- printToolUsage();
- return -1;
- } else {
- cmd = args[0];
- remainArgs = new String[args.length - 1];
- if (args.length > 1) {
- System.arraycopy(args, 1, remainArgs, 0, args.length - 1);
- }
- }
-
- BackupCommand type = BackupCommand.HELP;
- if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) {
- type = BackupCommand.CREATE;
- } else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) {
- type = BackupCommand.HELP;
- } else if (BackupCommand.DELETE.name().equalsIgnoreCase(cmd)) {
- type = BackupCommand.DELETE;
- } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(cmd)) {
- type = BackupCommand.DESCRIBE;
- } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(cmd)) {
- type = BackupCommand.HISTORY;
- } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(cmd)) {
- type = BackupCommand.PROGRESS;
- } else if (BackupCommand.SET.name().equalsIgnoreCase(cmd)) {
- type = BackupCommand.SET;
- } else if (BackupCommand.REPAIR.name().equalsIgnoreCase(cmd)) {
- type = BackupCommand.REPAIR;
- } else if (BackupCommand.MERGE.name().equalsIgnoreCase(cmd)) {
- type = BackupCommand.MERGE;
- } else {
- System.out.println("Unsupported command for backup: " + cmd);
- printToolUsage();
- return -1;
- }
-
- // enable debug logging
- Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
- if (this.cmd.hasOption(OPTION_DEBUG)) {
- backupClientLogger.setLevel(Level.DEBUG);
- } else {
- backupClientLogger.setLevel(Level.INFO);
- }
-
- BackupCommands.Command command = BackupCommands.createCommand(getConf(), type, this.cmd);
- if (type == BackupCommand.CREATE && conf != null) {
- ((BackupCommands.CreateCommand) command).setConf(conf);
- }
- try {
- command.execute();
- } catch (IOException e) {
- if (e.getMessage().equals(BackupCommands.INCORRECT_USAGE)) {
- return -1;
- }
- throw e;
- } finally {
- command.finish();
- }
- return 0;
- }
-
- @Override
- protected void addOptions() {
- // define supported options
- addOptNoArg(OPTION_DEBUG, OPTION_DEBUG_DESC);
- addOptWithArg(OPTION_TABLE, OPTION_TABLE_DESC);
- addOptWithArg(OPTION_BANDWIDTH, OPTION_BANDWIDTH_DESC);
- addOptWithArg(OPTION_WORKERS, OPTION_WORKERS_DESC);
- addOptWithArg(OPTION_RECORD_NUMBER, OPTION_RECORD_NUMBER_DESC);
- addOptWithArg(OPTION_SET, OPTION_SET_DESC);
- addOptWithArg(OPTION_PATH, OPTION_PATH_DESC);
- }
-
- @Override
- protected void processOptions(CommandLine cmd) {
- this.cmd = cmd;
- }
-
- @Override
- protected int doWork() throws Exception {
- return parseAndRun(cmd.getArgs());
- }
-
- public static void main(String[] args) throws Exception {
- Configuration conf = HBaseConfiguration.create();
- Path hbasedir = FSUtils.getRootDir(conf);
- URI defaultFs = hbasedir.getFileSystem(conf).getUri();
- FSUtils.setFsDefault(conf, new Path(defaultFs));
- int ret = ToolRunner.run(conf, new BackupDriver(), args);
- System.exit(ret);
- }
-
- @Override
- public int run(String[] args) throws IOException {
- if (conf == null) {
- LOG.error("Tool configuration is not initialized");
- throw new NullPointerException("conf");
- }
-
- CommandLine cmd;
- try {
- // parse the command line arguments
- cmd = parseArgs(args);
- cmdLineArgs = args;
- } catch (Exception e) {
- System.err.println("Error when parsing command-line arguments: " + e.getMessage());
- printToolUsage();
- return EXIT_FAILURE;
- }
- processOptions(cmd);
-
- int ret = EXIT_FAILURE;
- try {
- ret = doWork();
- } catch (Exception e) {
- LOG.error("Error running command-line tool", e);
- return EXIT_FAILURE;
- }
- return ret;
- }
-
- protected void printToolUsage() throws IOException {
- System.out.println(BackupCommands.USAGE);
- }
-}
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
deleted file mode 100644
index ed554ad..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Predicate;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Iterables;
-
-/**
- * Implementation of a file cleaner that checks if an hfile is still referenced by backup before
- * deleting it from hfile archive directory.
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abortable {
- private static final Log LOG = LogFactory.getLog(BackupHFileCleaner.class);
- private boolean stopped = false;
- private boolean aborted;
- private Configuration conf;
- private Connection connection;
- private long prevReadFromBackupTbl = 0, // timestamp of most recent read from backup:system table
- secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read from backup:system table
- //used by unit test to skip reading backup:system
- private boolean checkForFullyBackedUpTables = true;
- private List<TableName> fullyBackedUpTables = null;
-
- private Set<String> getFilenameFromBulkLoad(Map<byte[], List<Path>>[] maps) {
- Set<String> filenames = new HashSet<String>();
- for (Map<byte[], List<Path>> map : maps) {
- if (map == null) continue;
- for (List<Path> paths : map.values()) {
- for (Path p : paths) {
- filenames.add(p.getName());
- }
- }
- }
- return filenames;
- }
-
- private Set<String> loadHFileRefs(List<TableName> tableList) throws IOException {
- if (connection == null) {
- connection = ConnectionFactory.createConnection(conf);
- }
- try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
- Map<byte[], List<Path>>[] res =
- tbl.readBulkLoadedFiles(null, tableList);
- secondPrevReadFromBackupTbl = prevReadFromBackupTbl;
- prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime();
- return getFilenameFromBulkLoad(res);
- }
- }
-
- @VisibleForTesting
- void setCheckForFullyBackedUpTables(boolean b) {
- checkForFullyBackedUpTables = b;
- }
- @Override
- public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
- if (conf == null) {
- return files;
- }
- // obtain the Set of TableName's which have been fully backed up
- // so that we filter BulkLoad to be returned from server
- if (checkForFullyBackedUpTables) {
- if (connection == null) return files;
- try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
- fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
- } catch (IOException ioe) {
- LOG.error("Failed to get tables which have been fully backed up, skipping checking", ioe);
- return Collections.emptyList();
- }
- Collections.sort(fullyBackedUpTables);
- }
- final Set<String> hfileRefs;
- try {
- hfileRefs = loadHFileRefs(fullyBackedUpTables);
- } catch (IOException ioe) {
- LOG.error("Failed to read hfile references, skipping checking deletable files", ioe);
- return Collections.emptyList();
- }
- Iterable<FileStatus> deletables = Iterables.filter(files, new Predicate<FileStatus>() {
- @Override
- public boolean apply(FileStatus file) {
- // If the file is recent, be conservative and wait for one more scan of backup:system table
- if (file.getModificationTime() > secondPrevReadFromBackupTbl) {
- return false;
- }
- String hfile = file.getPath().getName();
- boolean foundHFileRef = hfileRefs.contains(hfile);
- return !foundHFileRef;
- }
- });
- return deletables;
- }
-
- @Override
- public boolean isFileDeletable(FileStatus fStat) {
- // work is done in getDeletableFiles()
- return true;
- }
-
- @Override
- public void setConf(Configuration config) {
- this.conf = config;
- this.connection = null;
- try {
- this.connection = ConnectionFactory.createConnection(conf);
- } catch (IOException ioe) {
- LOG.error("Couldn't establish connection", ioe);
- }
- }
-
- @Override
- public void stop(String why) {
- if (this.stopped) {
- return;
- }
- if (this.connection != null) {
- try {
- this.connection.close();
- } catch (IOException ioe) {
- LOG.debug("Got " + ioe + " when closing connection");
- }
- }
- this.stopped = true;
- }
-
- @Override
- public boolean isStopped() {
- return this.stopped;
- }
-
- @Override
- public void abort(String why, Throwable e) {
- LOG.warn("Aborting ReplicationHFileCleaner because " + why, e);
- this.aborted = true;
- stop(why);
- }
-
- @Override
- public boolean isAborted() {
- return this.aborted;
- }
-}