You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2017/03/10 23:37:56 UTC

[10/10] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3aaea8e0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3aaea8e0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3aaea8e0

Branch: refs/heads/HBASE-14123
Commit: 3aaea8e041d8963cba4b2d62060bc7d5abf787e8
Parents: e0365df
Author: tedyu <yu...@gmail.com>
Authored: Fri Mar 10 15:37:25 2017 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Fri Mar 10 15:37:25 2017 -0800

----------------------------------------------------------------------
 bin/hbase                                       |    6 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |    4 +-
 .../hbase/client/RpcRetryingCallerImpl.java     |    3 +-
 .../apache/hadoop/hbase/backup/BackupType.java  |   25 +
 .../hadoop/hbase/util/AbstractHBaseTool.java    |   18 +-
 .../hbase/IntegrationTestBackupRestore.java     |  311 +
 .../shaded/protobuf/generated/BackupProtos.java | 7013 ++++++++++++++++++
 .../shaded/protobuf/generated/MasterProtos.java |   18 +-
 .../src/main/protobuf/Backup.proto              |  117 +
 hbase-server/pom.xml                            |   10 +
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  128 +
 .../hadoop/hbase/backup/BackupCopyJob.java      |   55 +
 .../hadoop/hbase/backup/BackupDriver.java       |  204 +
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  545 ++
 .../hadoop/hbase/backup/BackupRequest.java      |  139 +
 .../hbase/backup/BackupRestoreConstants.java    |  115 +
 .../hbase/backup/BackupRestoreFactory.java      |   66 +
 .../hadoop/hbase/backup/BackupTableInfo.java    |   82 +
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  141 +
 .../apache/hadoop/hbase/backup/LogUtils.java    |   50 +
 .../hadoop/hbase/backup/RestoreDriver.java      |  265 +
 .../apache/hadoop/hbase/backup/RestoreJob.java  |   46 +
 .../hadoop/hbase/backup/RestoreRequest.java     |  135 +
 .../hbase/backup/impl/BackupAdminImpl.java      |  524 ++
 .../hbase/backup/impl/BackupCommands.java       |  780 ++
 .../hbase/backup/impl/BackupException.java      |   84 +
 .../hadoop/hbase/backup/impl/BackupManager.java |  472 ++
 .../hbase/backup/impl/BackupManifest.java       |  666 ++
 .../hbase/backup/impl/BackupSystemTable.java    | 1376 ++++
 .../backup/impl/FullTableBackupClient.java      |  189 +
 .../backup/impl/IncrementalBackupManager.java   |  344 +
 .../impl/IncrementalTableBackupClient.java      |  216 +
 .../hbase/backup/impl/RestoreTablesClient.java  |  237 +
 .../hbase/backup/impl/TableBackupClient.java    |  387 +
 .../backup/mapreduce/HFileSplitterJob.java      |  181 +
 .../mapreduce/MapReduceBackupCopyJob.java       |  344 +
 .../backup/mapreduce/MapReduceRestoreJob.java   |  182 +
 .../hbase/backup/master/BackupLogCleaner.java   |  142 +
 .../master/LogRollMasterProcedureManager.java   |  155 +
 .../regionserver/LogRollBackupSubprocedure.java |  168 +
 .../LogRollBackupSubprocedurePool.java          |  139 +
 .../LogRollRegionServerProcedureManager.java    |  185 +
 .../hadoop/hbase/backup/util/BackupSet.java     |   58 +
 .../hadoop/hbase/backup/util/BackupUtils.java   |  702 ++
 .../hadoop/hbase/backup/util/RestoreTool.java   |  610 ++
 .../BaseCoordinatedStateManager.java            |   20 +-
 .../coordination/ZkCoordinatedStateManager.java |   23 +-
 .../hbase/mapreduce/HFileInputFormat.java       |  174 +
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |   25 +-
 .../hadoop/hbase/mapreduce/WALInputFormat.java  |   42 +-
 .../hadoop/hbase/mapreduce/WALPlayer.java       |   83 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  129 +-
 .../hbase/master/snapshot/SnapshotManager.java  |    4 +-
 .../procedure/ZKProcedureCoordinatorRpcs.java   |    9 +-
 .../flush/MasterFlushTableProcedureManager.java |    4 +-
 .../hbase/regionserver/HRegionServer.java       |   17 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |    5 +
 .../hadoop/hbase/HBaseTestingUtility.java       |   41 +-
 .../hadoop/hbase/backup/TestBackupBase.java     |  293 +
 .../hbase/backup/TestBackupBoundaryTests.java   |   97 +
 .../hbase/backup/TestBackupCommandLineTool.java |  431 ++
 .../hadoop/hbase/backup/TestBackupDelete.java   |  102 +
 .../hbase/backup/TestBackupDeleteRestore.java   |   70 +
 .../hadoop/hbase/backup/TestBackupDescribe.java |  110 +
 .../hbase/backup/TestBackupMultipleDeletes.java |  159 +
 .../hbase/backup/TestBackupShowHistory.java     |  148 +
 .../hbase/backup/TestBackupStatusProgress.java  |   96 +
 .../hbase/backup/TestBackupSystemTable.java     |  511 ++
 .../hadoop/hbase/backup/TestFullBackup.java     |   59 +
 .../hadoop/hbase/backup/TestFullBackupSet.java  |  103 +
 .../backup/TestFullBackupSetRestoreSet.java     |  128 +
 .../hadoop/hbase/backup/TestFullRestore.java    |  345 +
 .../hbase/backup/TestIncrementalBackup.java     |  200 +
 .../TestIncrementalBackupDeleteTable.java       |  129 +
 .../hadoop/hbase/backup/TestRemoteBackup.java   |  129 +
 .../hadoop/hbase/backup/TestRemoteRestore.java  |   52 +
 .../hbase/backup/TestRestoreBoundaryTests.java  |   80 +
 .../hbase/backup/TestSystemTableSnapshot.java   |   56 +
 .../backup/master/TestBackupLogCleaner.java     |  162 +
 .../procedure/SimpleMasterProcedureManager.java |    2 +-
 .../hadoop/hbase/procedure/TestZKProcedure.java |    6 +-
 .../procedure/TestZKProcedureControllers.java   |   24 +-
 82 files changed, 21602 insertions(+), 103 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/bin/hbase
----------------------------------------------------------------------
diff --git a/bin/hbase b/bin/hbase
index 1653c5a..f1114af 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -103,6 +103,8 @@ if [ $# = 0 ]; then
   echo "  ltt             Run LoadTestTool"
   echo "  canary          Run the Canary tool"
   echo "  version         Print the version"
+  echo "  backup          Backup tables for recovery"
+  echo "  restore         Restore tables from existing backup image"
   echo "  CLASSNAME       Run the class named CLASSNAME"
   exit 1
 fi
@@ -315,6 +317,10 @@ elif [ "$COMMAND" = "hfile" ] ; then
   CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter'
 elif [ "$COMMAND" = "zkcli" ] ; then
   CLASS="org.apache.hadoop.hbase.zookeeper.ZooKeeperMainServer"
+elif [ "$COMMAND" = "backup" ] ; then
+  CLASS='org.apache.hadoop.hbase.backup.BackupDriver'
+elif [ "$COMMAND" = "restore" ] ; then
+  CLASS='org.apache.hadoop.hbase.backup.RestoreDriver'
 elif [ "$COMMAND" = "upgrade" ] ; then
   echo "This command was used to upgrade to HBase 0.96, it was removed in HBase 2.0.0."
   echo "Please follow the documentation at http://hbase.apache.org/book.html#upgrading."

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 1f143b5..5275dae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -4098,11 +4098,11 @@ public class HBaseAdmin implements Admin {
     if (result == 0) {
       Iterator<HColumnDescriptor> remoteHCDIter = peerHtd.getFamilies().iterator();
       Iterator<HColumnDescriptor> localHCDIter = localHtd.getFamilies().iterator();
-          
+
       while (remoteHCDIter.hasNext() && localHCDIter.hasNext()) {
         HColumnDescriptor remoteHCD = remoteHCDIter.next();
         HColumnDescriptor localHCD = localHCDIter.next();
-        
+
         String remoteHCDName = remoteHCD.getNameAsString();
         String localHCDName = localHCD.getNameAsString();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
index 3f65e6e..c59b020 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
@@ -107,6 +107,7 @@ public class RpcRetryingCallerImpl<T> implements RpcRetryingCaller<T> {
       } catch (PreemptiveFastFailException e) {
         throw e;
       } catch (Throwable t) {
+        Throwable e = t.getCause();
         ExceptionUtil.rethrowIfInterrupt(t);
 
         // translateException throws exception when should not retry: i.e. when request is bad.
@@ -185,7 +186,7 @@ public class RpcRetryingCallerImpl<T> implements RpcRetryingCaller<T> {
       }
     }
   }
-  
+
   /**
    * Get the good or the remote exception if any, throws the DoNotRetryIOException.
    * @param t the throwable to analyze

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java
new file mode 100644
index 0000000..79f4636
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public enum BackupType {
+  FULL, INCREMENTAL
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
index a51a80f..29d10ae 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
@@ -22,7 +22,9 @@ import java.util.Comparator;
 import java.util.HashMap;
 import java.util.List;
 
+import org.apache.commons.cli.BasicParser;
 import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.DefaultParser;
 import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.MissingOptionException;
@@ -32,9 +34,9 @@ import org.apache.commons.cli.ParseException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -47,12 +49,15 @@ public abstract class AbstractHBaseTool implements Tool, Configurable {
   protected static final int EXIT_SUCCESS = 0;
   protected static final int EXIT_FAILURE = 1;
 
+  public static final String SHORT_HELP_OPTION = "h";
+  public static final String LONG_HELP_OPTION = "help";
+
   private static final Option HELP_OPTION = new Option("h", "help", false,
       "Prints help for this tool.");
 
   private static final Log LOG = LogFactory.getLog(AbstractHBaseTool.class);
 
-  private final Options options = new Options();
+  protected final Options options = new Options();
 
   protected Configuration conf = null;
 
@@ -108,7 +113,7 @@ public abstract class AbstractHBaseTool implements Tool, Configurable {
   }
 
   @Override
-  public final int run(String[] args) throws IOException {
+  public int run(String[] args) throws IOException {
     cmdLineArgs = args;
     if (conf == null) {
       LOG.error("Tool configuration is not initialized");
@@ -161,6 +166,13 @@ public abstract class AbstractHBaseTool implements Tool, Configurable {
     return cl.getOptions().length != 0;
   }
 
+  protected CommandLine parseArgs(String[] args) throws ParseException {
+    options.addOption(SHORT_HELP_OPTION, LONG_HELP_OPTION, false, "Show usage");
+    addOptions();
+    CommandLineParser parser = new BasicParser();
+    return parser.parse(options, args);
+  }
+
   protected void printUsage() {
     printUsage("hbase " + getClass().getName() + " <options>", "Options:", "");
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aaea8e0/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
new file mode 100644
index 0000000..8596489
--- /dev/null
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
@@ -0,0 +1,311 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.backup.BackupAdmin;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRequest;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.RestoreRequest;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
+
+/**
+ * An integration test to detect regressions in HBASE-7912. Create
+ * a table with many regions, load data, perform series backup/load operations,
+ * then restore and verify data
+ * @see <a href="https://issues.apache.org/jira/browse/HBASE-7912">HBASE-7912</a>
+ * @see <a href="https://issues.apache.org/jira/browse/HBASE-14123">HBASE-14123</a>
+ */
+@Category(IntegrationTests.class)
+public class IntegrationTestBackupRestore extends IntegrationTestBase {
+  private static final String CLASS_NAME = IntegrationTestBackupRestore.class.getSimpleName();
+  protected static final Log LOG = LogFactory.getLog(IntegrationTestBackupRestore.class);
+  protected static final TableName TABLE_NAME1 = TableName.valueOf(CLASS_NAME + ".table1");
+  protected static final TableName TABLE_NAME2 = TableName.valueOf(CLASS_NAME + ".table2");
+  protected static final String COLUMN_NAME = "f";
+  protected static final String REGION_COUNT_KEY = "regions_per_rs";
+  protected static final String REGIONSERVER_COUNT_KEY = "region_servers";
+  protected static final int DEFAULT_REGION_COUNT = 10;
+  protected static final int DEFAULT_REGIONSERVER_COUNT = 2;
+  protected static int regionsCountPerServer;
+  protected static int regionServerCount;
+  protected static final String NB_ROWS_IN_BATCH_KEY = "rows_in_batch";
+  protected static final int DEFAULT_NB_ROWS_IN_BATCH = 20000;
+  private static int rowsInBatch;
+  private static String BACKUP_ROOT_DIR = "backupIT";
+
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    util = new IntegrationTestingUtility();
+    Configuration conf = util.getConfiguration();
+    regionsCountPerServer = conf.getInt(REGION_COUNT_KEY, DEFAULT_REGION_COUNT);
+    regionServerCount =
+        conf.getInt(REGIONSERVER_COUNT_KEY, DEFAULT_REGIONSERVER_COUNT);
+    rowsInBatch = conf.getInt(NB_ROWS_IN_BATCH_KEY, DEFAULT_NB_ROWS_IN_BATCH);
+    enableBackup(conf);
+    LOG.info(String.format("Initializing cluster with %d region servers.", regionServerCount));
+    util.initializeCluster(regionServerCount);
+    LOG.info("Cluster initialized");
+    util.deleteTableIfAny(TABLE_NAME1);
+    util.deleteTableIfAny(TABLE_NAME2);
+    LOG.info("Cluster ready");
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    LOG.info("Cleaning up after test.");
+    if(util.isDistributedCluster()) {
+      util.deleteTableIfAny(TABLE_NAME1);
+      LOG.info("Cleaning up after test. TABLE1 done");
+      util.deleteTableIfAny(TABLE_NAME2);
+      LOG.info("Cleaning up after test. TABLE2 done");
+      cleanUpBackupDir();
+    }
+    LOG.info("Restoring cluster.");
+    util.restoreCluster();
+    LOG.info("Cluster restored.");
+  }
+
+  private void enableBackup(Configuration conf) {
+    // Enable backup
+    conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
+    BackupManager.decorateMasterConfiguration(conf);
+    BackupManager.decorateRegionServerConfiguration(conf);
+  }
+
+  private void cleanUpBackupDir() throws IOException {
+    FileSystem fs = FileSystem.get(util.getConfiguration());
+    fs.delete(new Path(BACKUP_ROOT_DIR), true);
+  }
+
+  @Test
+  public void testBackupRestore() throws Exception {
+    BACKUP_ROOT_DIR = util.getDataTestDirOnTestFS() + Path.SEPARATOR + BACKUP_ROOT_DIR;
+    createTable(TABLE_NAME1);
+    createTable(TABLE_NAME2);
+    runTest();
+  }
+
+
+  private void createTable(TableName tableName) throws Exception {
+    long startTime, endTime;
+    HTableDescriptor desc = new HTableDescriptor(tableName);
+    HColumnDescriptor[] columns =
+        new HColumnDescriptor[]{new HColumnDescriptor(COLUMN_NAME)};
+    LOG.info(String.format("Creating table %s with %d splits.", tableName,
+      regionsCountPerServer));
+    startTime = System.currentTimeMillis();
+    HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), desc, columns,
+      regionsCountPerServer);
+    util.waitTableAvailable(tableName);
+    endTime = System.currentTimeMillis();
+    LOG.info(String.format("Pre-split table created successfully in %dms.",
+      (endTime - startTime)));
+  }
+
+  private void loadData(TableName table, int numRows) throws IOException {
+    Connection conn = util.getConnection();
+    // #0- insert some data to a table
+    Table t1 = conn.getTable(table);
+    util.loadRandomRows(t1, new byte[]{'f'}, 100, numRows);
+    // flush table
+    conn.getAdmin().flush(TableName.valueOf(table.getName()));
+  }
+
+  private void runTest() throws IOException {
+
+    try (Connection conn = util.getConnection();
+         Admin admin = conn.getAdmin();
+         BackupAdmin client = new BackupAdminImpl(conn);) {
+
+      // #0- insert some data to table TABLE_NAME1, TABLE_NAME2
+      loadData(TABLE_NAME1, rowsInBatch);
+      loadData(TABLE_NAME2, rowsInBatch);
+      // #1 - create full backup for all tables
+      LOG.info("create full backup image for all tables");
+      List<TableName> tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
+
+      BackupRequest.Builder builder = new BackupRequest.Builder();
+      BackupRequest request =
+          builder.withBackupType(BackupType.FULL).withTableList(tables)
+              .withTargetRootDir(BACKUP_ROOT_DIR).build();
+      String backupIdFull = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdFull));
+      // #2 - insert some data to table
+      loadData(TABLE_NAME1, rowsInBatch);
+      loadData(TABLE_NAME2, rowsInBatch);
+      HTable t1 = (HTable) conn.getTable(TABLE_NAME1);
+      Assert.assertEquals(util.countRows(t1), rowsInBatch * 2);
+      t1.close();
+      HTable t2 = (HTable) conn.getTable(TABLE_NAME2);
+      Assert.assertEquals(util.countRows(t2), rowsInBatch * 2);
+      t2.close();
+      // #3 - incremental backup for tables
+      tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
+      builder = new BackupRequest.Builder();
+      request =
+          builder.withBackupType(BackupType.INCREMENTAL).withTableList(tables)
+              .withTargetRootDir(BACKUP_ROOT_DIR).build();
+      String backupIdIncMultiple = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdIncMultiple));
+      // #4 - restore full backup for all tables, without overwrite
+      TableName[] tablesRestoreFull = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
+      client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull,
+        null, true));
+      // #5.1 - check tables for full restore
+      assertTrue(admin.tableExists(TABLE_NAME1));
+      assertTrue(admin.tableExists(TABLE_NAME2));
+      // #5.2 - checking row count of tables for full restore
+      HTable hTable = (HTable) conn.getTable(TABLE_NAME1);
+      Assert.assertEquals(util.countRows(hTable), rowsInBatch);
+      hTable.close();
+      hTable = (HTable) conn.getTable(TABLE_NAME2);
+      Assert.assertEquals(util.countRows(hTable), rowsInBatch);
+      hTable.close();
+      // #6 - restore incremental backup for multiple tables, with overwrite
+      TableName[] tablesRestoreIncMultiple = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
+      client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false,
+        tablesRestoreIncMultiple, null, true));
+      hTable = (HTable) conn.getTable(TABLE_NAME1);
+      Assert.assertEquals(util.countRows(hTable), rowsInBatch * 2);
+      hTable.close();
+      hTable = (HTable) conn.getTable(TABLE_NAME2);
+      Assert.assertEquals(util.countRows(hTable), rowsInBatch * 2);
+      hTable.close();
+    }
+  }
+
+  protected boolean checkSucceeded(String backupId) throws IOException {
+    BackupInfo status = getBackupInfo(backupId);
+    if (status == null) return false;
+    return status.getState() == BackupState.COMPLETE;
+  }
+
+  private BackupInfo getBackupInfo(String backupId) throws IOException {
+    try (BackupSystemTable table = new BackupSystemTable(util.getConnection())) {
+      return table.readBackupInfo(backupId);
+    }
+  }
+
+  /**
+   * Get restore request.
+   */
+  public RestoreRequest createRestoreRequest(String backupRootDir, String backupId, boolean check,
+      TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
+    RestoreRequest.Builder builder = new RestoreRequest.Builder();
+    return builder.withBackupRootDir(backupRootDir)
+                                    .withBackupId(backupId)
+                                    .withCheck(check)
+                                    .withFromTables(fromTables)
+                                    .withToTables(toTables)
+                                    .withOvewrite(isOverwrite).build();
+  }
+
+  @Override
+  public void setUpCluster() throws Exception {
+    util = getTestingUtil(getConf());
+    enableBackup(getConf());
+    LOG.debug("Initializing/checking cluster has " + regionServerCount + " servers");
+    util.initializeCluster(regionServerCount);
+    LOG.debug("Done initializing/checking cluster");
+  }
+
+  @Override
+  public int runTestFromCommandLine() throws Exception {
+    testBackupRestore();
+    return 0;
+  }
+
+  @Override
+  public TableName getTablename() {
+    // That is only valid when Monkey is CALM (no monkey)
+    return null;
+  }
+
+  @Override
+  protected Set<String> getColumnFamilies() {
+    // That is only valid when Monkey is CALM (no monkey)
+    return null;
+  }
+
+  @Override
+  protected void addOptions() {
+    addOptWithArg(REGIONSERVER_COUNT_KEY, "Total number of region servers. Default: '"
+        + DEFAULT_REGIONSERVER_COUNT + "'");
+    addOptWithArg(REGION_COUNT_KEY, "Total number of regions. Default: " + DEFAULT_REGION_COUNT);
+    addOptWithArg(NB_ROWS_IN_BATCH_KEY, "Total number of data rows to be loaded (per table/batch."
+        + " Total number of batches=2). Default: " + DEFAULT_NB_ROWS_IN_BATCH);
+
+  }
+
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    super.processOptions(cmd);
+    regionsCountPerServer =
+        Integer.parseInt(cmd.getOptionValue(REGION_COUNT_KEY,
+          Integer.toString(DEFAULT_REGION_COUNT)));
+    regionServerCount =
+        Integer.parseInt(cmd.getOptionValue(REGIONSERVER_COUNT_KEY,
+          Integer.toString(DEFAULT_REGIONSERVER_COUNT)));
+    rowsInBatch =
+        Integer.parseInt(cmd.getOptionValue(NB_ROWS_IN_BATCH_KEY,
+          Integer.toString(DEFAULT_NB_ROWS_IN_BATCH)));
+    LOG.info(Objects.toStringHelper("Parsed Options").add(REGION_COUNT_KEY, regionsCountPerServer)
+        .add(REGIONSERVER_COUNT_KEY, regionServerCount).add(NB_ROWS_IN_BATCH_KEY, rowsInBatch)
+        .toString());
+  }
+
+  public static void main(String[] args) throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    IntegrationTestingUtility.setUseDistributedCluster(conf);
+    int status = ToolRunner.run(conf, new IntegrationTestBackupRestore(), args);
+    System.exit(status);
+  }
+}