You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2016/11/14 17:22:57 UTC

[11/11] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2725fb25
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2725fb25
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2725fb25

Branch: refs/heads/14123
Commit: 2725fb25b1f21bd6533409ebd4768627d74f2130
Parents: 9250bf8
Author: tedyu <yu...@gmail.com>
Authored: Mon Nov 14 09:21:25 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Mon Nov 14 09:21:25 2016 -0800

----------------------------------------------------------------------
 bin/hbase                                       |    6 +
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |   48 +-
 .../ClientSnapshotDescriptionUtils.java         |    8 +-
 .../java/org/apache/hadoop/hbase/TableName.java |    6 +-
 .../apache/hadoop/hbase/backup/BackupType.java  |   25 +
 .../hadoop/hbase/util/AbstractHBaseTool.java    |   33 +-
 .../hbase/coprocessor/TestClassLoading.java     |   53 +-
 .../hbase/IntegrationTestBackupRestore.java     |  298 +
 .../shaded/protobuf/generated/BackupProtos.java | 7592 ++++++++++++++++++
 .../src/main/protobuf/Backup.proto              |   96 +
 hbase-server/pom.xml                            |   10 +
 .../hbase/tmpl/master/MasterStatusTmpl.jamon    |    2 +
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  159 +
 .../hadoop/hbase/backup/BackupCopyTask.java     |   53 +
 .../hadoop/hbase/backup/BackupDriver.java       |  205 +
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  562 ++
 .../hadoop/hbase/backup/BackupRequest.java      |   90 +
 .../hbase/backup/BackupRestoreConstants.java    |   89 +
 .../backup/BackupRestoreServerFactory.java      |   65 +
 .../hadoop/hbase/backup/BackupStatus.java       |  104 +
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  166 +
 .../hadoop/hbase/backup/RestoreDriver.java      |  248 +
 .../hadoop/hbase/backup/RestoreRequest.java     |   94 +
 .../apache/hadoop/hbase/backup/RestoreTask.java |   50 +
 .../hbase/backup/impl/BackupCommands.java       |  754 ++
 .../hbase/backup/impl/BackupException.java      |   86 +
 .../hadoop/hbase/backup/impl/BackupManager.java |  502 ++
 .../hbase/backup/impl/BackupManifest.java       |  711 ++
 .../hbase/backup/impl/BackupSnapshotCopy.java   |   36 +
 .../hbase/backup/impl/BackupSystemTable.java    |  937 +++
 .../backup/impl/BackupSystemTableHelper.java    |  437 +
 .../backup/impl/FullTableBackupClient.java      |  538 ++
 .../hbase/backup/impl/HBaseBackupAdmin.java     |  555 ++
 .../backup/impl/IncrementalBackupManager.java   |  356 +
 .../impl/IncrementalTableBackupClient.java      |  237 +
 .../hbase/backup/impl/RestoreTablesClient.java  |  236 +
 .../hbase/backup/mapreduce/HFileSplitter.java   |  191 +
 .../mapreduce/MapReduceBackupCopyTask.java      |  351 +
 .../backup/mapreduce/MapReduceRestoreTask.java  |  172 +
 .../hbase/backup/master/BackupController.java   |   63 +
 .../hbase/backup/master/BackupLogCleaner.java   |  144 +
 .../master/LogRollMasterProcedureManager.java   |  148 +
 .../regionserver/LogRollBackupSubprocedure.java |  167 +
 .../LogRollBackupSubprocedurePool.java          |  137 +
 .../LogRollRegionServerProcedureManager.java    |  186 +
 .../hbase/backup/util/BackupClientUtil.java     |  437 +
 .../hbase/backup/util/BackupServerUtil.java     |  487 ++
 .../hadoop/hbase/backup/util/BackupSet.java     |   63 +
 .../hadoop/hbase/backup/util/LogUtils.java      |   45 +
 .../hbase/backup/util/RestoreServerUtil.java    |  755 ++
 .../BaseCoordinatedStateManager.java            |   20 +-
 .../coordination/ZkCoordinatedStateManager.java |   23 +-
 .../hadoop/hbase/mapred/TableOutputFormat.java  |    6 +-
 .../hbase/mapreduce/HFileInputFormat2.java      |  175 +
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |    6 +-
 .../hadoop/hbase/mapreduce/WALInputFormat.java  |   42 +-
 .../hadoop/hbase/mapreduce/WALPlayer.java       |   83 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |    7 +-
 .../procedure/ZKProcedureCoordinatorRpcs.java   |    3 +-
 .../hbase/regionserver/HRegionServer.java       |   16 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   27 +-
 .../apache/hadoop/hbase/util/ProcedureUtil.java |  105 +
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |    5 +
 .../hadoop/hbase/HBaseTestingUtility.java       |   45 +-
 .../org/apache/hadoop/hbase/TestNamespace.java  |   12 +-
 .../hadoop/hbase/backup/TestBackupBase.java     |  299 +
 .../hbase/backup/TestBackupBoundaryTests.java   |   97 +
 .../hbase/backup/TestBackupCommandLineTool.java |  413 +
 .../hadoop/hbase/backup/TestBackupDelete.java   |  102 +
 .../hbase/backup/TestBackupDeleteRestore.java   |   70 +
 .../hadoop/hbase/backup/TestBackupDescribe.java |  111 +
 .../hbase/backup/TestBackupMultipleDeletes.java |  173 +
 .../hbase/backup/TestBackupShowHistory.java     |  146 +
 .../hbase/backup/TestBackupStatusProgress.java  |   98 +
 .../hbase/backup/TestBackupSystemTable.java     |  527 ++
 .../hadoop/hbase/backup/TestFullBackup.java     |   76 +
 .../hadoop/hbase/backup/TestFullBackupSet.java  |  105 +
 .../backup/TestFullBackupSetRestoreSet.java     |  129 +
 .../hadoop/hbase/backup/TestFullRestore.java    |  322 +
 .../hbase/backup/TestIncrementalBackup.java     |  208 +
 .../TestIncrementalBackupDeleteTable.java       |  140 +
 .../hadoop/hbase/backup/TestRemoteBackup.java   |  132 +
 .../hadoop/hbase/backup/TestRemoteRestore.java  |   51 +
 .../hbase/backup/TestRestoreBoundaryTests.java  |   78 +
 .../hbase/backup/TestSystemTableSnapshot.java   |   59 +
 .../backup/master/TestBackupLogCleaner.java     |  162 +
 .../hbase/client/TestMetaWithReplicas.java      |    8 +-
 ...TestMasterCoprocessorExceptionWithAbort.java |    3 +
 ...estMasterCoprocessorExceptionWithRemove.java |    3 +
 .../TestTableOutputFormatConnectionExhaust.java |   10 +-
 .../hbase/master/MockNoopMasterServices.java    |   13 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java |   18 +-
 .../master/TestDistributedLogSplitting.java     |   12 +-
 .../hadoop/hbase/master/TestMasterFailover.java |   11 +-
 .../TestMasterOperationsForRegionReplicas.java  |   13 +-
 .../TestMasterRestartAfterDisablingTable.java   |   13 +-
 .../hadoop/hbase/master/TestRestartCluster.java |    7 +-
 .../hadoop/hbase/master/TestRollingRestart.java |    4 +-
 .../hbase/regionserver/TestRegionOpen.java      |    9 +-
 .../regionserver/TestRegionServerMetrics.java   |   46 +-
 .../replication/TestMasterReplication.java      |   22 +-
 .../security/access/TestTablePermissions.java   |   14 +-
 .../visibility/TestVisibilityLabelsWithACL.java |   16 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |   81 +-
 .../util/hbck/OfflineMetaRebuildTestCore.java   |    4 +-
 .../util/hbck/TestOfflineMetaRebuildBase.java   |    4 +-
 .../util/hbck/TestOfflineMetaRebuildHole.java   |    4 +-
 .../hbck/TestOfflineMetaRebuildOverlap.java     |    4 +-
 108 files changed, 22986 insertions(+), 199 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/bin/hbase
----------------------------------------------------------------------
diff --git a/bin/hbase b/bin/hbase
index 1653c5a..f1114af 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -103,6 +103,8 @@ if [ $# = 0 ]; then
   echo "  ltt             Run LoadTestTool"
   echo "  canary          Run the Canary tool"
   echo "  version         Print the version"
+  echo "  backup          Backup tables for recovery"
+  echo "  restore         Restore tables from existing backup image"
   echo "  CLASSNAME       Run the class named CLASSNAME"
   exit 1
 fi
@@ -315,6 +317,10 @@ elif [ "$COMMAND" = "hfile" ] ; then
   CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter'
 elif [ "$COMMAND" = "zkcli" ] ; then
   CLASS="org.apache.hadoop.hbase.zookeeper.ZooKeeperMainServer"
+elif [ "$COMMAND" = "backup" ] ; then
+  CLASS='org.apache.hadoop.hbase.backup.BackupDriver'
+elif [ "$COMMAND" = "restore" ] ; then
+  CLASS='org.apache.hadoop.hbase.backup.RestoreDriver'
 elif [ "$COMMAND" = "upgrade" ] ; then
   echo "This command was used to upgrade to HBase 0.96, it was removed in HBase 2.0.0."
   echo "Please follow the documentation at http://hbase.apache.org/book.html#upgrading."

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 330348d..2cc8fa7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Consistency;
@@ -78,6 +78,8 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -316,6 +318,28 @@ public final class ProtobufUtil {
     return ServerName.valueOf(hostName, port, startCode);
   }
 
+
+  /**
+   * Convert a protocol buffer ServerName to a ServerName
+   *
+   * @param proto the protocol buffer ServerName to convert
+   * @return the converted ServerName
+   */
+  public static ServerName toServerNameShaded(
+      final org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName proto) {
+    if (proto == null) return null;
+    String hostName = proto.getHostName();
+    long startCode = -1;
+    int port = -1;
+    if (proto.hasPort()) {
+      port = proto.getPort();
+    }
+    if (proto.hasStartCode()) {
+      startCode = proto.getStartCode();
+    }
+    return ServerName.valueOf(hostName, port, startCode);
+  }
+
   /**
    * Convert a protobuf Durability into a client Durability
    */
@@ -1664,6 +1688,20 @@ public final class ProtobufUtil {
         .setQualifier(ByteStringer.wrap(tableName.getQualifier())).build();
   }
 
+  public static TableName toTableName(
+      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableNamePB) {
+    return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(),
+        tableNamePB.getQualifier().asReadOnlyByteBuffer());
+  }
+
+  public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName
+    toProtoTableNameShaded(TableName tableName) {
+    return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.newBuilder()
+        .setNamespace(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFrom(tableName.getNamespace()))
+        .setQualifier(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFrom(tableName.getQualifier())).build();
+  }
+
+
   /**
    * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding
    * buffers when working with byte arrays
@@ -1741,6 +1779,10 @@ public final class ProtobufUtil {
     return regionBuilder.build();
   }
 
+  public static BackupProtos.BackupType toProtoBackupType(BackupType type) {
+    return BackupProtos.BackupType.valueOf(type.name());
+  }
+
   /**
    * Get a ServerName from the passed in data bytes.
    * @param data Data with a serialize server name in it; can handle the old style
@@ -1749,7 +1791,7 @@ public final class ProtobufUtil {
    * has a serialized {@link ServerName} in it.
    * @return Returns null if <code>data</code> is null else converts passed data
    * to a ServerName instance.
-   * @throws DeserializationException 
+   * @throws DeserializationException
    */
   public static ServerName toServerName(final byte [] data) throws DeserializationException {
     if (data == null || data.length <= 0) return null;
@@ -1784,4 +1826,4 @@ public final class ProtobufUtil {
     int port = Addressing.parsePort(str);
     return ServerName.valueOf(hostname, port, -1L);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java
index 88b6bec..7f19cbd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java
@@ -44,7 +44,8 @@ public class ClientSnapshotDescriptionUtils {
       // make sure the table name is valid, this will implicitly check validity
       TableName tableName = TableName.valueOf(snapshot.getTable());
 
-      if (tableName.isSystemTable()) {
+      if (tableName.isSystemTable() && !tableName.toString().equals("hbase:backup")) {
+        // allow hbase:backup table snapshot, but disallow other system  tables
         throw new IllegalArgumentException("System table snapshots are not allowed");
       }
     }
@@ -52,9 +53,10 @@ public class ClientSnapshotDescriptionUtils {
 
   /**
    * Returns a single line (no \n) representation of snapshot metadata.  Use this instead of
-   * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription#toString()}.  We don't replace SnapshotDescrpition's toString
+   * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription#toString()}.
+   * We don't replace SnapshotDescrpition's toString
    * because it is auto-generated by protoc.
-   * @param ssd
+   * @param ssd snapshot description
    * @return Single line string with a summary of the snapshot parameters
    */
   public static String toString(HBaseProtos.SnapshotDescription ssd) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
index 63066b3..a6e251d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
@@ -23,10 +23,10 @@ import java.util.Arrays;
 import java.util.Set;
 import java.util.concurrent.CopyOnWriteArraySet;
 
+import org.apache.hadoop.hbase.KeyValue.KVComparator;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.KeyValue.KVComparator;
 
 /**
  * Immutable POJO class for representing a table name.
@@ -86,6 +86,10 @@ public final class TableName implements Comparable<TableName> {
   public static final TableName NAMESPACE_TABLE_NAME =
       valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace");
 
+  /** The backup table's name. */
+  public static final TableName BACKUP_TABLE_NAME =
+      valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "backup");
+
   public static final String OLD_META_STR = ".META.";
   public static final String OLD_ROOT_STR = "-ROOT-";
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java
new file mode 100644
index 0000000..79f4636
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public enum BackupType {
+  FULL, INCREMENTAL
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
index b5beaae..90988f0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
@@ -21,8 +21,12 @@ import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
 
+import org.apache.commons.cli.BasicParser;
 import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.DefaultParser;
 import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.MissingOptionException;
@@ -32,9 +36,9 @@ import org.apache.commons.cli.ParseException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -47,15 +51,20 @@ public abstract class AbstractHBaseTool implements Tool, Configurable {
   protected static final int EXIT_SUCCESS = 0;
   protected static final int EXIT_FAILURE = 1;
 
+  public static final String SHORT_HELP_OPTION = "h";
+  public static final String LONG_HELP_OPTION = "help";
+
   private static final Option HELP_OPTION = new Option("h", "help", false,
       "Prints help for this tool.");
 
   private static final Log LOG = LogFactory.getLog(AbstractHBaseTool.class);
 
-  private final Options options = new Options();
+  protected final Options options = new Options();
 
   protected Configuration conf = null;
 
+  protected static final Set<String> requiredOptions = new TreeSet<String>();
+
   protected String[] cmdLineArgs = null;
 
   // To print options in order they were added in help text.
@@ -107,8 +116,19 @@ public abstract class AbstractHBaseTool implements Tool, Configurable {
     this.conf = conf;
   }
 
+  protected boolean sanityCheckOptions(CommandLine cmd) {
+    boolean success = true;
+    for (String reqOpt : requiredOptions) {
+      if (!cmd.hasOption(reqOpt)) {
+        LOG.error("Required option -" + reqOpt + " is missing");
+        success = false;
+      }
+    }
+    return success;
+  }
+
   @Override
-  public final int run(String[] args) throws IOException {
+  public int run(String[] args) throws IOException {
     cmdLineArgs = args;
     if (conf == null) {
       LOG.error("Tool configuration is not initialized");
@@ -161,6 +181,13 @@ public abstract class AbstractHBaseTool implements Tool, Configurable {
     return cl.getOptions().length != 0;
   }
 
+  protected CommandLine parseArgs(String[] args) throws ParseException {
+    options.addOption(SHORT_HELP_OPTION, LONG_HELP_OPTION, false, "Show usage");
+    addOptions();
+    CommandLineParser parser = new BasicParser();
+    return parser.parse(options, args);
+  }
+
   protected void printUsage() {
     printUsage("bin/hbase " + getClass().getName() + " <options>", "Options:", "");
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
index f5d2a20..5728cd9 100644
--- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
+++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
@@ -18,10 +18,37 @@
  */
 package org.apache.hadoop.hbase.coprocessor;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.master.BackupController;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.TestServerCustomProtocol;
@@ -30,22 +57,11 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.ClassLoaderTestHelper;
 import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.RegionLoad;
-
-import java.io.*;
-import java.util.*;
-
-import org.junit.*;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
 /**
  * Test coprocessors class loading.
  */
@@ -69,6 +85,7 @@ public class TestClassLoading {
   private static Class<?> regionCoprocessor2 = TestServerCustomProtocol.PingHandler.class;
   private static Class<?> regionServerCoprocessor = SampleRegionWALObserver.class;
   private static Class<?> masterCoprocessor = BaseMasterObserver.class;
+  private static Class<?> backupCoprocessor = BackupController.class;
 
   private static final String[] regionServerSystemCoprocessors =
       new String[]{
@@ -82,7 +99,7 @@ public class TestClassLoading {
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
-
+    conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
     // regionCoprocessor1 will be loaded on all regionservers, since it is
     // loaded for any tables (user or meta).
     conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
@@ -532,7 +549,7 @@ public class TestClassLoading {
     // to master: verify that the master is reporting the correct set of
     // loaded coprocessors.
     final String loadedMasterCoprocessorsVerify =
-        "[" + masterCoprocessor.getSimpleName() + "]";
+        "[" + backupCoprocessor.getSimpleName() + ", " + masterCoprocessor.getSimpleName() + "]";
     String loadedMasterCoprocessors =
         java.util.Arrays.toString(
             TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors());
@@ -541,7 +558,7 @@ public class TestClassLoading {
 
   @Test
   public void testFindCoprocessors() {
-    // HBASE 12277: 
+    // HBASE 12277:
     CoprocessorHost masterCpHost =
                              TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
new file mode 100644
index 0000000..416ac13
--- /dev/null
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.backup.BackupAdmin;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRequest;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.RestoreRequest;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.RegionSplitter;
+import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
+import org.apache.hadoop.util.ToolRunner;
+import org.hamcrest.CoreMatchers;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
+
+/**
+ * An integration test to detect regressions in HBASE-7912. Create
+ * a table with many regions, load data, perform series backup/load operations,
+ * then restore and verify data
+ * @see <a href="https://issues.apache.org/jira/browse/HBASE-7912">HBASE-7912</a>
+ */
+@Category(IntegrationTests.class)
+public class IntegrationTestBackupRestore extends IntegrationTestBase {
+  private static final String CLASS_NAME = IntegrationTestBackupRestore.class.getSimpleName();
+  protected static final Log LOG = LogFactory.getLog(IntegrationTestBackupRestore.class);
+  protected static final TableName TABLE_NAME1 = TableName.valueOf(CLASS_NAME + ".table1");
+  protected static final TableName TABLE_NAME2 = TableName.valueOf(CLASS_NAME + ".table2");
+  protected static final String COLUMN_NAME = "f";
+  protected static final String REGION_COUNT_KEY = "regions_per_rs";
+  protected static final String REGIONSERVER_COUNT_KEY = "region_servers";
+  protected static final int DEFAULT_REGION_COUNT = 10;
+  protected static final int DEFAULT_REGIONSERVER_COUNT = 2;
+  protected static int regionsCountPerServer;
+  protected static int regionServerCount;
+  protected static final String NB_ROWS_IN_BATCH_KEY = "rows_in_batch";
+  protected static final int DEFAULT_NB_ROWS_IN_BATCH = 20000;
+  private static int rowsInBatch;
+  private static String BACKUP_ROOT_DIR = "backupIT";
+
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    util = new IntegrationTestingUtility();
+    regionsCountPerServer = util.getConfiguration().getInt(REGION_COUNT_KEY, DEFAULT_REGION_COUNT);
+    regionServerCount =
+        util.getConfiguration().getInt(REGIONSERVER_COUNT_KEY, DEFAULT_REGIONSERVER_COUNT);
+    rowsInBatch = util.getConfiguration().getInt(NB_ROWS_IN_BATCH_KEY, DEFAULT_NB_ROWS_IN_BATCH);
+    LOG.info(String.format("Initializing cluster with %d region servers.", regionServerCount));
+    util.initializeCluster(regionServerCount);
+    LOG.info("Cluster initialized");
+    util.deleteTableIfAny(TABLE_NAME1);
+    util.deleteTableIfAny(TABLE_NAME2);
+    util.waitTableAvailable(BackupSystemTable.getTableName());
+    LOG.info("Cluster ready");
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    LOG.info("Cleaning up after test.");
+    if(util.isDistributedCluster()) {
+      util.deleteTableIfAny(TABLE_NAME1);
+      LOG.info("Cleaning up after test. TABLE1 done");
+      util.deleteTableIfAny(TABLE_NAME2);
+      LOG.info("Cleaning up after test. TABLE2 done");
+      cleanUpBackupDir();
+    }
+    LOG.info("Restoring cluster.");
+    util.restoreCluster();
+    LOG.info("Cluster restored.");
+  }
+
+  private void cleanUpBackupDir() throws IOException {
+    FileSystem fs = FileSystem.get(util.getConfiguration());
+    fs.delete(new Path(BACKUP_ROOT_DIR), true);
+  }
+
+  @Test
+  public void testBackupRestore() throws Exception {
+    BACKUP_ROOT_DIR = util.getDataTestDirOnTestFS() + Path.SEPARATOR + BACKUP_ROOT_DIR;
+    createTable(TABLE_NAME1);
+    createTable(TABLE_NAME2);
+    runTest();
+  }
+
+
+  private void createTable(TableName tableName) throws Exception {
+    long startTime, endTime;
+    HTableDescriptor desc = new HTableDescriptor(tableName);
+    HColumnDescriptor[] columns =
+        new HColumnDescriptor[]{new HColumnDescriptor(COLUMN_NAME)};
+    SplitAlgorithm algo = new RegionSplitter.UniformSplit();
+    LOG.info(String.format("Creating table %s with %d splits.", tableName,
+      regionsCountPerServer));
+    startTime = System.currentTimeMillis();
+    HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), desc, columns,
+      regionsCountPerServer);
+    util.waitTableAvailable(tableName);
+    endTime = System.currentTimeMillis();
+    LOG.info(String.format("Pre-split table created successfully in %dms.",
+      (endTime - startTime)));
+  }
+
+  private void loadData(TableName table, int numRows) throws IOException {
+    Connection conn = util.getConnection();
+    // #0- insert some data to a table
+    HTable t1 = (HTable) conn.getTable(table);
+    util.loadRandomRows(t1, new byte[]{'f'}, 100, numRows);
+    conn.getBufferedMutator(table).flush();
+  }
+
+  private void runTest() throws IOException {
+    Connection conn = util.getConnection();
+
+    // #0- insert some data to table TABLE_NAME1, TABLE_NAME2
+    loadData(TABLE_NAME1, rowsInBatch);
+    loadData(TABLE_NAME2, rowsInBatch);
+    // #1 - create full backup for all tables
+    LOG.info("create full backup image for all tables");
+    List<TableName> tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
+    HBaseAdmin admin = null;
+    admin = (HBaseAdmin) conn.getAdmin();
+    BackupAdmin client = new HBaseBackupAdmin(util.getConnection());
+
+    BackupRequest request = new BackupRequest();
+    request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdFull = client.backupTables(request);
+    assertTrue(checkSucceeded(backupIdFull));
+    // #2 - insert some data to table
+    loadData(TABLE_NAME1, rowsInBatch);
+    loadData(TABLE_NAME2, rowsInBatch);
+    HTable t1 = (HTable) conn.getTable(TABLE_NAME1);
+    Assert.assertThat(util.countRows(t1), CoreMatchers.equalTo(rowsInBatch * 2));
+    t1.close();
+    HTable t2 = (HTable) conn.getTable(TABLE_NAME2);
+    Assert.assertThat(util.countRows(t2), CoreMatchers.equalTo(rowsInBatch * 2));
+    t2.close();
+    // #3 - incremental backup for tables
+    tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
+    request = new BackupRequest();
+    request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
+        .setTargetRootDir(BACKUP_ROOT_DIR);
+    String backupIdIncMultiple = client.backupTables(request);
+    assertTrue(checkSucceeded(backupIdIncMultiple));
+    // #4 - restore full backup for all tables, without overwrite
+    TableName[] tablesRestoreFull = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull,
+      null, true));
+    // #5.1 - check tables for full restore
+    Admin hAdmin = util.getConnection().getAdmin();
+    assertTrue(hAdmin.tableExists(TABLE_NAME1));
+    assertTrue(hAdmin.tableExists(TABLE_NAME2));
+    hAdmin.close();
+    // #5.2 - checking row count of tables for full restore
+    HTable hTable = (HTable) conn.getTable(TABLE_NAME1);
+    Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch));
+    hTable.close();
+    hTable = (HTable) conn.getTable(TABLE_NAME2);
+    Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch));
+    hTable.close();
+    // #6 - restore incremental backup for multiple tables, with overwrite
+    TableName[] tablesRestoreIncMultiple = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
+    client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false,
+      tablesRestoreIncMultiple, null, true));
+    hTable = (HTable) conn.getTable(TABLE_NAME1);
+    Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch * 2));
+    hTable.close();
+    hTable = (HTable) conn.getTable(TABLE_NAME2);
+    Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch * 2));
+    hTable.close();
+    admin.close();
+    conn.close();
+  }
+
+  protected boolean checkSucceeded(String backupId) throws IOException {
+    BackupInfo status = getBackupContext(backupId);
+    if (status == null) return false;
+    return status.getState() == BackupState.COMPLETE;
+  }
+
+  private BackupInfo getBackupContext(String backupId) throws IOException {
+    try (BackupSystemTable table = new BackupSystemTable(util.getConnection())) {
+      BackupInfo status = table.readBackupInfo(backupId);
+      return status;
+    }
+  }
+
+  /**
+   * Get restore request.
+   */
+  public RestoreRequest createRestoreRequest(String backupRootDir, String backupId, boolean check,
+      TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
+    RestoreRequest request = new RestoreRequest();
+    request.setBackupRootDir(backupRootDir).setBackupId(backupId).setCheck(check)
+        .setFromTables(fromTables).setToTables(toTables).setOverwrite(isOverwrite);
+    return request;
+  }
+
+  @Override
+  public void setUpCluster() throws Exception {
+    util = getTestingUtil(getConf());
+    LOG.debug("Initializing/checking cluster has " + regionServerCount + " servers");
+    util.initializeCluster(regionServerCount);
+    LOG.debug("Done initializing/checking cluster");
+  }
+
+  @Override
+  public int runTestFromCommandLine() throws Exception {
+    testBackupRestore();
+    return 0;
+  }
+
+  @Override
+  public TableName getTablename() {
+    return null;
+  }
+
+  @Override
+  protected Set<String> getColumnFamilies() {
+    return null;
+  }
+
+  @Override
+  protected void addOptions() {
+    addOptWithArg(REGIONSERVER_COUNT_KEY, "Total number of region servers. Default: '"
+        + DEFAULT_REGIONSERVER_COUNT + "'");
+    addOptWithArg(REGION_COUNT_KEY, "Total number of regions. Default: " + DEFAULT_REGION_COUNT);
+    addOptWithArg(NB_ROWS_IN_BATCH_KEY, "Total number of data rows to be loaded (per table/batch."
+        + " Total number of batches=2). Default: " + DEFAULT_NB_ROWS_IN_BATCH);
+
+  }
+
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    super.processOptions(cmd);
+    regionsCountPerServer =
+        Integer.parseInt(cmd.getOptionValue(REGION_COUNT_KEY,
+          Integer.toString(DEFAULT_REGION_COUNT)));
+    regionServerCount =
+        Integer.parseInt(cmd.getOptionValue(REGIONSERVER_COUNT_KEY,
+          Integer.toString(DEFAULT_REGIONSERVER_COUNT)));
+    rowsInBatch =
+        Integer.parseInt(cmd.getOptionValue(NB_ROWS_IN_BATCH_KEY,
+          Integer.toString(DEFAULT_NB_ROWS_IN_BATCH)));
+    LOG.info(Objects.toStringHelper("Parsed Options").add(REGION_COUNT_KEY, regionsCountPerServer)
+        .add(REGIONSERVER_COUNT_KEY, regionServerCount).add(NB_ROWS_IN_BATCH_KEY, rowsInBatch)
+        .toString());
+  }
+
+  public static void main(String[] args) throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    IntegrationTestingUtility.setUseDistributedCluster(conf);
+    int status = ToolRunner.run(conf, new IntegrationTestBackupRestore(), args);
+    System.exit(status);
+  }
+}