You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2016/11/14 17:22:55 UTC

[09/11] hbase git commit: HBASE-14123 HBase Backup/Restore Phase 2 (Vladimir Rodionov)

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-protocol-shaded/src/main/protobuf/Backup.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Backup.proto b/hbase-protocol-shaded/src/main/protobuf/Backup.proto
new file mode 100644
index 0000000..b7196ca
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/protobuf/Backup.proto
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless optional by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains Backup manifest
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "BackupProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+
+enum BackupType {
+  FULL = 0;
+  INCREMENTAL = 1;
+}
+
+message ServerTimestamp {
+  optional ServerName server = 1;
+  optional uint64 timestamp = 2;
+}
+
+message TableServerTimestamp {
+  optional TableName table = 1;
+  repeated ServerTimestamp server_timestamp = 2;
+}
+
+message BackupImage {
+  optional string backup_id = 1;
+  optional BackupType backup_type = 2;
+  optional string root_dir = 3;
+  repeated TableName table_list = 4;
+  optional uint64 start_ts = 5;
+  optional uint64 complete_ts = 6;
+  repeated BackupImage ancestors = 7; 
+  repeated TableServerTimestamp tst_map = 8;
+  
+}
+
+
+message TableBackupStatus {
+  optional TableName table = 1;
+  optional string target_dir = 2;
+  optional string snapshot = 3; 	
+}
+
+message BackupInfo {
+  optional string backup_id = 1;
+  optional BackupType type = 2;
+  optional string target_root_dir = 3;
+  optional BackupState state = 4;
+  optional BackupPhase phase = 5;
+  optional string failed_message = 6;
+  repeated TableBackupStatus table_backup_status = 7;
+  optional uint64  start_ts = 8;
+  optional uint64  end_ts = 9;
+  optional uint32 progress = 10; 
+  optional string job_id = 11;
+  optional uint32 workers_number = 12;
+  optional uint64 bandwidth = 13;
+  
+  enum BackupState {
+    WAITING = 0;
+    RUNNING = 1;
+    COMPLETE = 2;
+    FAILED = 3;
+    CANCELLED = 4;
+  }
+
+  enum BackupPhase {
+    REQUEST = 0;
+    SNAPSHOT = 1;
+    PREPARE_INCREMENTAL = 2;
+    SNAPSHOTCOPY = 3;
+    INCREMENTAL_COPY = 4;
+    STORE_MANIFEST = 5;
+  } 
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 0bdee40..e6aed8e 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -402,6 +402,16 @@
       <groupId>commons-collections</groupId>
       <artifactId>commons-collections</artifactId>
     </dependency>
+     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <version>${hadoop-two.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <version>${hadoop-two.version}</version>
+    </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-hadoop-compat</artifactId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 36d5112..d6223ea 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -400,6 +400,8 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
         } else if (tableName.equals(TableName.valueOf("hbase:replication"))) {
             description = "The hbase:replication table tracks cross cluster replication through " +
             "WAL file offsets.";
+        } else if (tableName.equals(TableName.BACKUP_TABLE_NAME)) {
+            description = "The hbase:backup table stores backup system information.";
         }
     </%java>
     <td><% description %></td>

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
new file mode 100644
index 0000000..0b8de28
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.Future;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupSet;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+/**
+ * The administrative API for HBase Backup. Construct an instance
+ * and call {@link #close()} afterwards.
+ * <p>BackupAdmin can be used to create backups, restore data from backups and for
+ * other backup-related operations.
+ *
+ * @since 2.0
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+
+public interface BackupAdmin extends Closeable{
+
+  /**
+   * Backs up given list of tables fully. Synchronous operation.
+   *
+   * @param userRequest BackupRequest instance
+   * @return the backup Id
+   */
+
+  public String backupTables(final BackupRequest userRequest) throws IOException;
+
+  /**
+   * Backs up given list of tables fully. Asynchronous operation.
+   *
+   * @param userRequest BackupRequest instance
+   * @return the backup Id future
+   */
+  public Future<String> backupTablesAsync(final BackupRequest userRequest) throws IOException;
+
+  /**
+   * Restore backup
+   * @param request - restore request
+   * @throws IOException exception
+   */
+  public void restore(RestoreRequest request) throws IOException;
+
+  /**
+   * Restore backup
+   * @param request - restore request
+   * @return Future which client can wait on
+   * @throws IOException exception
+   */
+  public Future<Void> restoreAsync(RestoreRequest request) throws IOException;
+
+  /**
+   * Describe backup image command
+   * @param backupId - backup id
+   * @return backup info
+   * @throws IOException exception
+   */
+  public BackupInfo getBackupInfo(String backupId) throws IOException;
+
+  /**
+   * Show backup progress command
+   * @param backupId - backup id (may be null)
+   * @return backup progress (0-100%), -1 if no active sessions
+   *  or session not found
+   * @throws IOException exception
+   */
+  public int getProgress(String backupId) throws IOException;
+
+  /**
+   * Delete backup image command
+   * @param backupIds - backup id
+   * @return total number of deleted sessions
+   * @throws IOException exception
+   */
+  public int deleteBackups(String[] backupIds) throws IOException;
+
+  /**
+   * Show backup history command
+   * @param n - last n backup sessions
+   * @return list of backup infos
+   * @throws IOException exception
+   */
+  public List<BackupInfo> getHistory(int n) throws IOException;
+
+
+  /**
+   * Show backup history command with filters
+   * @param n - last n backup sessions
+   * @param f - list of filters
+   * @return list of backup infos
+   * @throws IOException exception
+   */
+  public List<BackupInfo> getHistory(int n, BackupInfo.Filter ... f) throws IOException;
+
+
+  /**
+   * Backup sets list command - list all backup sets. Backup set is
+   * a named group of tables.
+   * @return all registered backup sets
+   * @throws IOException exception
+   */
+  public List<BackupSet> listBackupSets() throws IOException;
+
+  /**
+   * Backup set describe command. Shows list of tables in
+   * this particular backup set.
+   * @param name set name
+   * @return backup set description or null
+   * @throws IOException exception
+   */
+  public BackupSet getBackupSet(String name) throws IOException;
+
+  /**
+   * Delete backup set command
+   * @param name - backup set name
+   * @return true, if success, false - otherwise
+   * @throws IOException exception
+   */
+  public boolean deleteBackupSet(String name) throws IOException;
+
+  /**
+   * Add tables to backup set command
+   * @param name - name of backup set.
+   * @param tables - list of tables to be added to this set.
+   * @throws IOException exception
+   */
+  public void addToBackupSet(String name, TableName[] tables) throws IOException;
+
+  /**
+   * Remove tables from backup set
+   * @param name - name of backup set.
+   * @param tables - list of tables to be removed from this set.
+   * @throws IOException exception
+   */
+  public void removeFromBackupSet(String name, String[] tables) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java
new file mode 100644
index 0000000..26a7e44
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface BackupCopyTask extends Configurable {
+
+  /**
+   * Copy backup data to destination
+   * @param backupContext context object
+   * @param backupManager backup manager
+   * @param conf configuration
+   * @param backupType backup type (FULL or INCREMENTAL)
+   * @param options array of options (implementation-specific)
+   * @return result (0 - success, -1 failure )
+   * @throws IOException exception
+   */
+  int copy(BackupInfo backupContext, BackupManager backupManager, Configuration conf,
+      BackupType copyType, String[] options) throws IOException;
+
+
+   /**
+    * Cancel copy job
+    * @param jobHandler - copy job handler
+    * @throws IOException
+    */
+   void cancelCopyJob(String jobHandler) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
new file mode 100644
index 0000000..099e418
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.backup.impl.BackupCommands;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.backup.util.LogUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupDriver extends AbstractHBaseTool implements BackupRestoreConstants {
+
+  private static final Log LOG = LogFactory.getLog(BackupDriver.class);
+  private CommandLine cmd;
+
+  public BackupDriver() throws IOException {
+    init();
+  }
+
+  protected void init() throws IOException {
+    // disable irrelevant loggers to avoid it mess up command output
+    LogUtils.disableZkAndClientLoggers(LOG);
+  }
+
+  private int parseAndRun(String[] args) throws IOException {
+
+    // Check if backup is enabled
+    if (!BackupManager.isBackupEnabled(getConf())) {
+      System.err.println("Backup is not enabled. To enable backup, "+
+          "set \'hbase.backup.enabled'=true and restart "+
+          "the cluster");
+      return -1;
+    }
+
+    String cmd = null;
+    String[] remainArgs = null;
+    if (args == null || args.length == 0) {
+      printToolUsage();
+      return -1;
+    } else {
+      cmd = args[0];
+      remainArgs = new String[args.length - 1];
+      if (args.length > 1) {
+        System.arraycopy(args, 1, remainArgs, 0, args.length - 1);
+      }
+    }
+
+    BackupCommand type = BackupCommand.HELP;
+    if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.CREATE;
+    } else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.HELP;
+    } else if (BackupCommand.DELETE.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.DELETE;
+    } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.DESCRIBE;
+    } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.HISTORY;
+    } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.PROGRESS;
+    } else if (BackupCommand.SET.name().equalsIgnoreCase(cmd)) {
+      type = BackupCommand.SET;
+    } else {
+      System.out.println("Unsupported command for backup: " + cmd);
+      printToolUsage();
+      return -1;
+    }
+
+    // enable debug logging
+    Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
+    if (this.cmd.hasOption(OPTION_DEBUG)) {
+      backupClientLogger.setLevel(Level.DEBUG);
+    } else {
+      backupClientLogger.setLevel(Level.INFO);
+    }
+
+    // TODO: get rid of Command altogether?
+    BackupCommands.Command command = BackupCommands.createCommand(getConf(), type, this.cmd);
+    if (type == BackupCommand.CREATE && conf != null) {
+      ((BackupCommands.CreateCommand) command).setConf(conf);
+    }
+    try {
+      command.execute();
+    } catch (IOException e) {
+      if (e.getMessage().equals(BackupCommands.INCORRECT_USAGE)) {
+        return -1;
+      }
+      throw e;
+    }
+    return 0;
+  }
+
+  @Override
+  protected void addOptions() {
+    // define supported options
+    addOptNoArg(OPTION_DEBUG, OPTION_DEBUG_DESC);
+    addOptWithArg(OPTION_TABLE, OPTION_TABLE_DESC);
+    addOptWithArg(OPTION_BANDWIDTH, OPTION_BANDWIDTH_DESC);
+    addOptWithArg(OPTION_WORKERS, OPTION_WORKERS_DESC);
+    addOptWithArg(OPTION_RECORD_NUMBER, OPTION_RECORD_NUMBER_DESC);
+    addOptWithArg(OPTION_SET, OPTION_SET_DESC);
+    addOptWithArg(OPTION_PATH, OPTION_PATH_DESC);
+  }
+
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    this.cmd = cmd;
+  }
+
+  @Override
+  protected int doWork() throws Exception {
+    return parseAndRun(cmd.getArgs());
+  }
+
+  public static void main(String[] args) throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    Path hbasedir = FSUtils.getRootDir(conf);
+    URI defaultFs = hbasedir.getFileSystem(conf).getUri();
+    FSUtils.setFsDefault(conf, new Path(defaultFs));
+    int ret = ToolRunner.run(conf, new BackupDriver(), args);
+    System.exit(ret);
+  }
+
+  @Override
+  public int run(String[] args) throws IOException {
+    if (conf == null) {
+      LOG.error("Tool configuration is not initialized");
+      throw new NullPointerException("conf");
+    }
+
+    CommandLine cmd;
+    try {
+      // parse the command line arguments
+      cmd = parseArgs(args);
+      cmdLineArgs = args;
+    } catch (Exception e) {
+      System.out.println("Error when parsing command-line arguments: " + e.getMessage());
+      printToolUsage();
+      return EXIT_FAILURE;
+    }
+
+    if (!sanityCheckOptions(cmd)) {
+      printToolUsage();
+      return EXIT_FAILURE;
+    }
+
+    processOptions(cmd);
+
+    int ret = EXIT_FAILURE;
+    try {
+      ret = doWork();
+    } catch (Exception e) {
+      LOG.error("Error running command-line tool", e);
+      return EXIT_FAILURE;
+    }
+    return ret;
+  }
+
+  @Override
+  protected boolean sanityCheckOptions(CommandLine cmd) {
+    boolean success = true;
+    for (String reqOpt : requiredOptions) {
+      if (!cmd.hasOption(reqOpt)) {
+        System.out.println("Required option -" + reqOpt + " is missing");
+        success = false;
+      }
+    }
+    return success;
+  }
+
+  protected void printToolUsage() throws IOException {
+    System.out.println(BackupCommands.USAGE);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
new file mode 100644
index 0000000..4ea0299
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
@@ -0,0 +1,562 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.TableBackupStatus;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+/**
+ * An object to encapsulate the information for each backup request
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupInfo implements Comparable<BackupInfo> {
+  private static final Log LOG = LogFactory.getLog(BackupInfo.class);
+
+  public static interface Filter {
+
+    /**
+     * Filter interface
+     * @param info backup info
+     * @return true if info passes filter, false otherwise
+     */
+    public boolean apply(BackupInfo info);
+  }
+
+  /**
+   * Backup status flag
+   */
+  public static enum BackupState {
+    WAITING, RUNNING, COMPLETE, FAILED, ANY;
+  }
+
+  /**
+   * Backup phase
+   */
+  public static enum BackupPhase {
+    SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST;
+  }
+
+  /**
+   *  Backup id
+   */
+  private String backupId;
+
+  /**
+   * Backup type, full or incremental
+   */
+  private BackupType type;
+
+  /**
+   *  Target root directory for storing the backup files
+   */
+  private String targetRootDir;
+
+  /**
+   *  Backup state
+   */
+  private BackupState state;
+
+  /**
+   * Backup phase
+   */
+  private BackupPhase phase;
+
+  /**
+   * Backup failure message
+   */
+  private String failedMsg;
+
+  /**
+   * Backup status map for all tables
+   */
+  private Map<TableName, BackupStatus> backupStatusMap;
+
+  /**
+   * Actual start timestamp of a backup process
+   */
+  private long startTs;
+
+  /**
+   * Actual end timestamp of the backup process
+   */
+  private long endTs;
+
+  /**
+   * Total bytes of incremental logs copied
+   */
+  private long totalBytesCopied;
+
+  /**
+   *  For incremental backup, a location of a backed-up hlogs
+   */
+  private String hlogTargetDir = null;
+
+  /**
+   * Incremental backup file list
+   */
+  transient private List<String> incrBackupFileList;
+
+  /**
+   * New region server log timestamps for table set after distributed log roll
+   * key - table name, value - map of RegionServer hostname -> last log rolled timestamp
+   */
+  transient private HashMap<TableName, HashMap<String, Long>> tableSetTimestampMap;
+
+  /**
+   * Backup progress in %% (0-100)
+   */
+  private int progress;
+
+  /**
+   *  Distributed job id
+   */
+  private String jobId;
+
+  /**
+   *  Number of parallel workers. -1 - system defined
+   */
+  private int workers = -1;
+
+  /**
+   * Bandwidth per worker in MB per sec. -1 - unlimited
+   */
+  private long bandwidth = -1;
+
+  public BackupInfo() {
+    backupStatusMap = new HashMap<TableName, BackupStatus>();
+  }
+
+  public BackupInfo(String backupId, BackupType type, TableName[] tables, String targetRootDir) {
+    this();
+    this.backupId = backupId;
+    this.type = type;
+    this.targetRootDir = targetRootDir;
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("CreateBackupContext: " + tables.length + " " + tables[0]);
+    }
+    this.addTables(tables);
+
+    if (type == BackupType.INCREMENTAL) {
+      setHlogTargetDir(BackupClientUtil.getLogBackupDir(targetRootDir, backupId));
+    }
+
+    this.startTs = 0;
+    this.endTs = 0;
+  }
+
+  public String getJobId() {
+    return jobId;
+  }
+
+  public void setJobId(String jobId) {
+    this.jobId = jobId;
+  }
+
+  public int getWorkers() {
+    return workers;
+  }
+
+  public void setWorkers(int workers) {
+    this.workers = workers;
+  }
+
+  public long getBandwidth() {
+    return bandwidth;
+  }
+
+  public void setBandwidth(long bandwidth) {
+    this.bandwidth = bandwidth;
+  }
+
+  public void setBackupStatusMap(Map<TableName, BackupStatus> backupStatusMap) {
+    this.backupStatusMap = backupStatusMap;
+  }
+
+  public HashMap<TableName, HashMap<String, Long>> getTableSetTimestampMap() {
+    return tableSetTimestampMap;
+  }
+
+  public void
+      setTableSetTimestampMap(HashMap<TableName, HashMap<String, Long>> tableSetTimestampMap) {
+    this.tableSetTimestampMap = tableSetTimestampMap;
+  }
+
+  public String getHlogTargetDir() {
+    return hlogTargetDir;
+  }
+
+  public void setType(BackupType type) {
+    this.type = type;
+  }
+
+  public void setTargetRootDir(String targetRootDir) {
+    this.targetRootDir = targetRootDir;
+  }
+
+  public void setTotalBytesCopied(long totalBytesCopied) {
+    this.totalBytesCopied = totalBytesCopied;
+  }
+
+  /**
+   * Set progress (0-100%)
+   * @param p progress value
+   */
+
+  public void setProgress(int p) {
+    this.progress = p;
+  }
+
+  /**
+   * Get current progress
+   */
+  public int getProgress() {
+    return progress;
+  }
+
+  public String getBackupId() {
+    return backupId;
+  }
+
+  public void setBackupId(String backupId) {
+    this.backupId = backupId;
+  }
+
+  public BackupStatus getBackupStatus(TableName table) {
+    return this.backupStatusMap.get(table);
+  }
+
+  public String getFailedMsg() {
+    return failedMsg;
+  }
+
+  public void setFailedMsg(String failedMsg) {
+    this.failedMsg = failedMsg;
+  }
+
+  public long getStartTs() {
+    return startTs;
+  }
+
+  public void setStartTs(long startTs) {
+    this.startTs = startTs;
+  }
+
+  public long getEndTs() {
+    return endTs;
+  }
+
+  public void setEndTs(long endTs) {
+    this.endTs = endTs;
+  }
+
+  public long getTotalBytesCopied() {
+    return totalBytesCopied;
+  }
+
+  public BackupState getState() {
+    return state;
+  }
+
+  public void setState(BackupState flag) {
+    this.state = flag;
+  }
+
+  public BackupPhase getPhase() {
+    return phase;
+  }
+
+  public void setPhase(BackupPhase phase) {
+    this.phase = phase;
+  }
+
+  public BackupType getType() {
+    return type;
+  }
+
+  public void setSnapshotName(TableName table, String snapshotName) {
+    this.backupStatusMap.get(table).setSnapshotName(snapshotName);
+  }
+
+  public String getSnapshotName(TableName table) {
+    return this.backupStatusMap.get(table).getSnapshotName();
+  }
+
+  public List<String> getSnapshotNames() {
+    List<String> snapshotNames = new ArrayList<String>();
+    for (BackupStatus backupStatus : this.backupStatusMap.values()) {
+      snapshotNames.add(backupStatus.getSnapshotName());
+    }
+    return snapshotNames;
+  }
+
+  public Set<TableName> getTables() {
+    return this.backupStatusMap.keySet();
+  }
+
+  public List<TableName> getTableNames() {
+    return new ArrayList<TableName>(backupStatusMap.keySet());
+  }
+
+  public void addTables(TableName[] tables) {
+    for (TableName table : tables) {
+      BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId);
+      this.backupStatusMap.put(table, backupStatus);
+    }
+  }
+
+  public void setTables(List<TableName> tables) {
+    this.backupStatusMap.clear();
+    for (TableName table : tables) {
+      BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId);
+      this.backupStatusMap.put(table, backupStatus);
+    }
+  }
+
+  public String getTargetRootDir() {
+    return targetRootDir;
+  }
+
+  public void setHlogTargetDir(String hlogTagetDir) {
+    this.hlogTargetDir = hlogTagetDir;
+  }
+
+  public String getHLogTargetDir() {
+    return hlogTargetDir;
+  }
+
+  public List<String> getIncrBackupFileList() {
+    return incrBackupFileList;
+  }
+
+  public void setIncrBackupFileList(List<String> incrBackupFileList) {
+    this.incrBackupFileList = incrBackupFileList;
+  }
+
+  /**
+   * Set the new region server log timestamps after distributed log roll
+   * @param newTableSetTimestampMap table timestamp map
+   */
+  public void
+      setIncrTimestampMap(HashMap<TableName, HashMap<String, Long>> newTableSetTimestampMap) {
+    this.tableSetTimestampMap = newTableSetTimestampMap;
+  }
+
+  /**
+   * Get new region server log timestamps after distributed log roll
+   * @return new region server log timestamps
+   */
+  public HashMap<TableName, HashMap<String, Long>> getIncrTimestampMap() {
+    return this.tableSetTimestampMap;
+  }
+
+  public TableName getTableBySnapshot(String snapshotName) {
+    for (Entry<TableName, BackupStatus> entry : this.backupStatusMap.entrySet()) {
+      if (snapshotName.equals(entry.getValue().getSnapshotName())) {
+        return entry.getKey();
+      }
+    }
+    return null;
+  }
+
+  public BackupProtos.BackupInfo toProtosBackupInfo() {
+    BackupProtos.BackupInfo.Builder builder = BackupProtos.BackupInfo.newBuilder();
+    builder.setBackupId(getBackupId());
+    setBackupStatusMap(builder);
+    builder.setEndTs(getEndTs());
+    if (getFailedMsg() != null) {
+      builder.setFailedMessage(getFailedMsg());
+    }
+    if (getState() != null) {
+      builder.setState(BackupProtos.BackupInfo.BackupState.valueOf(getState().name()));
+    }
+    if (getPhase() != null) {
+      builder.setPhase(BackupProtos.BackupInfo.BackupPhase.valueOf(getPhase().name()));
+    }
+
+    builder.setProgress(getProgress());
+    builder.setStartTs(getStartTs());
+    builder.setTargetRootDir(getTargetRootDir());
+    builder.setType(BackupProtos.BackupType.valueOf(getType().name()));
+    builder.setWorkersNumber(workers);
+    builder.setBandwidth(bandwidth);
+    if (jobId != null) {
+      builder.setJobId(jobId);
+    }
+    return builder.build();
+  }
+
+  @Override
+  public int hashCode() {
+    int hash = 33 * type.hashCode() + backupId != null ? backupId.hashCode() : 0;
+    if (targetRootDir != null) {
+      hash = 33 * hash + targetRootDir.hashCode();
+    }
+    hash = 33 * hash + state.hashCode();
+    hash = 33 * hash + phase.hashCode();
+    hash = 33 * hash + (int)(startTs ^ (startTs >>> 32));
+    hash = 33 * hash + (int)(endTs ^ (endTs >>> 32));
+    hash = 33 * hash + (int)(totalBytesCopied ^ (totalBytesCopied >>> 32));
+    if (hlogTargetDir != null) {
+      hash = 33 * hash + hlogTargetDir.hashCode();
+    }
+    if (jobId != null) {
+      hash = 33 * hash + jobId.hashCode();
+    }
+    return hash;
+  }
+  @Override
+  public boolean equals(Object obj) {
+    if (obj instanceof BackupInfo) {
+      BackupInfo other = (BackupInfo) obj;
+      try {
+        return Bytes.equals(toByteArray(), other.toByteArray());
+      } catch (IOException e) {
+        LOG.error(e);
+        return false;
+      }
+    } else {
+      return false;
+    }
+  }
+
+  public byte[] toByteArray() throws IOException {
+    return toProtosBackupInfo().toByteArray();
+  }
+
+  private void setBackupStatusMap(Builder builder) {
+    for (Entry<TableName, BackupStatus> entry : backupStatusMap.entrySet()) {
+      builder.addTableBackupStatus(entry.getValue().toProto());
+    }
+  }
+
+  public static BackupInfo fromByteArray(byte[] data) throws IOException {
+    return fromProto(BackupProtos.BackupInfo.parseFrom(data));
+  }
+
+  public static BackupInfo fromStream(final InputStream stream) throws IOException {
+    return fromProto(BackupProtos.BackupInfo.parseDelimitedFrom(stream));
+  }
+
+  public static BackupInfo fromProto(BackupProtos.BackupInfo proto) {
+    BackupInfo context = new BackupInfo();
+    context.setBackupId(proto.getBackupId());
+    context.setBackupStatusMap(toMap(proto.getTableBackupStatusList()));
+    context.setEndTs(proto.getEndTs());
+    if (proto.hasFailedMessage()) {
+      context.setFailedMsg(proto.getFailedMessage());
+    }
+    if (proto.hasState()) {
+      context.setState(BackupInfo.BackupState.valueOf(proto.getState().name()));
+    }
+
+    context.setHlogTargetDir(BackupClientUtil.getLogBackupDir(proto.getTargetRootDir(),
+      proto.getBackupId()));
+
+    if (proto.hasPhase()) {
+      context.setPhase(BackupPhase.valueOf(proto.getPhase().name()));
+    }
+    if (proto.hasProgress()) {
+      context.setProgress(proto.getProgress());
+    }
+    context.setStartTs(proto.getStartTs());
+    context.setTargetRootDir(proto.getTargetRootDir());
+    context.setType(BackupType.valueOf(proto.getType().name()));
+    context.setWorkers(proto.getWorkersNumber());
+    context.setBandwidth(proto.getBandwidth());
+    if (proto.hasJobId()) {
+      context.setJobId(proto.getJobId());
+    }
+    return context;
+  }
+
+  private static Map<TableName, BackupStatus> toMap(List<TableBackupStatus> list) {
+    HashMap<TableName, BackupStatus> map = new HashMap<>();
+    for (TableBackupStatus tbs : list) {
+      map.put(ProtobufUtil.toTableName(tbs.getTable()), BackupStatus.convert(tbs));
+    }
+    return map;
+  }
+
+  public String getShortDescription() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("ID             : " + backupId).append("\n");
+    sb.append("Type           : " + getType()).append("\n");
+    sb.append("Tables         : " + getTableListAsString()).append("\n");
+    sb.append("State          : " + getState()).append("\n");
+    Date date = null;
+    Calendar cal = Calendar.getInstance();
+    cal.setTimeInMillis(getStartTs());
+    date = cal.getTime();
+    sb.append("Start time     : " + date).append("\n");
+    if (state == BackupState.FAILED) {
+      sb.append("Failed message : " + getFailedMsg()).append("\n");
+    } else if (state == BackupState.RUNNING) {
+      sb.append("Phase          : " + getPhase()).append("\n");
+    } else if (state == BackupState.COMPLETE) {
+      cal = Calendar.getInstance();
+      cal.setTimeInMillis(getEndTs());
+      date = cal.getTime();
+      sb.append("End time       : " + date).append("\n");
+    }
+    sb.append("Progress       : " + getProgress()).append("\n");
+    return sb.toString();
+  }
+
+  public String getStatusAndProgressAsString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("id: ").append(getBackupId()).append(" state: ").append(getState())
+        .append(" progress: ").append(getProgress());
+    return sb.toString();
+  }
+
+  public String getTableListAsString() {
+    return StringUtils.join(backupStatusMap.keySet(), ",");
+  }
+
+  @Override
+  public int compareTo(BackupInfo o) {
+    Long thisTS = Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1));
+    Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1));
+    return thisTS.compareTo(otherTS);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java
new file mode 100644
index 0000000..ba8cf33
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * POJO class for backup request
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class BackupRequest {
+  private BackupType type;
+  private List<TableName> tableList;
+  private String targetRootDir;
+  private int workers = -1;
+  private long bandwidth = -1L;
+  private String backupSetName;
+
+  public BackupRequest() {
+  }
+
+  public BackupRequest setBackupType(BackupType type) {
+    this.type = type;
+    return this;
+  }
+  public BackupType getBackupType() {
+    return this.type;
+  }
+
+  public BackupRequest setTableList(List<TableName> tableList) {
+    this.tableList = tableList;
+    return this;
+  }
+  public List<TableName> getTableList() {
+    return this.tableList;
+  }
+
+  public BackupRequest setTargetRootDir(String targetRootDir) {
+    this.targetRootDir = targetRootDir;
+    return this;
+  }
+  public String getTargetRootDir() {
+    return this.targetRootDir;
+  }
+
+  public BackupRequest setWorkers(int workers) {
+    this.workers = workers;
+    return this;
+  }
+  public int getWorkers() {
+    return this.workers;
+  }
+
+  public BackupRequest setBandwidth(long bandwidth) {
+    this.bandwidth = bandwidth;
+    return this;
+  }
+  public long getBandwidth() {
+    return this.bandwidth;
+  }
+
+  public String getBackupSetName() {
+    return backupSetName;
+  }
+
+  public void setBackupSetName(String backupSetName) {
+    this.backupSetName = backupSetName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
new file mode 100644
index 0000000..e22ae78
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * BackupRestoreConstants holds a bunch of HBase Backup and Restore constants
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public interface BackupRestoreConstants {
+  /**
+   * Backup/Restore constants
+   */
+  public final static String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl";
+  public final static int BACKUP_SYSTEM_TTL_DEFAULT = HConstants.FOREVER;
+  public final static String BACKUP_ENABLE_KEY = "hbase.backup.enable";
+  public final static boolean BACKUP_ENABLE_DEFAULT = false;
+  // Drivers option list
+  public static final String OPTION_OVERWRITE = "o";
+  public static final String OPTION_OVERWRITE_DESC =
+      "Overwrite data if any of the restore target tables exists";
+
+  public static final String OPTION_CHECK = "c";
+  public static final String OPTION_CHECK_DESC =
+      "Check restore sequence and dependencies only (does not execute the command)";
+
+  public static final String OPTION_SET = "s";
+  public static final String OPTION_SET_DESC = "Backup set name";
+  public static final String OPTION_SET_RESTORE_DESC =
+      "Backup set to restore, mutually exclusive with table list <table(s)>";
+
+  public static final String OPTION_DEBUG = "d";
+  public static final String OPTION_DEBUG_DESC = "Enable debug loggings";
+
+  public static final String OPTION_TABLE = "t";
+  public static final String OPTION_TABLE_DESC = "Table name. If specified, only backup images,"
+      + " which contain this table will be listed.";
+
+  public static final String OPTION_BANDWIDTH = "b";
+  public static final String OPTION_BANDWIDTH_DESC = "Bandwidth per task (MapReduce task) in MB/s";
+
+  public static final String OPTION_WORKERS = "w";
+  public static final String OPTION_WORKERS_DESC = "Number of parallel MapReduce tasks to execute";
+
+  public static final String OPTION_RECORD_NUMBER = "n";
+  public static final String OPTION_RECORD_NUMBER_DESC =
+      "Number of records of backup history. Default: 10";
+
+  public static final String OPTION_PATH = "p";
+  public static final String OPTION_PATH_DESC = "Backup destination root directory path";
+
+  public static final String OPTION_TABLE_MAPPING = "m";
+  public static final String OPTION_TABLE_MAPPING_DESC =
+      "A comma separated list of target tables. "
+          + "If specified, each table in <tables> must have a mapping";
+
+  // delimiter in tablename list in restore command
+  public static final String TABLENAME_DELIMITER_IN_COMMAND = ",";
+
+  public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root";
+
+  public static final String BACKUPID_PREFIX = "backup_";
+
+  public static enum BackupCommand {
+    CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS,
+    SET, SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java
new file mode 100644
index 0000000..ac5bd9b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyTask;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceRestoreTask;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.util.ReflectionUtils;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BackupRestoreServerFactory {
+
+  public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class";
+  public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class";
+
+  private BackupRestoreServerFactory(){
+    throw new AssertionError("Instantiating utility class...");
+  }
+
+  /**
+   * Gets backup restore task
+   * @param conf - configuration
+   * @return backup restore task instance
+   */
+  public static RestoreTask getRestoreTask(Configuration conf) {
+    Class<? extends RestoreTask> cls =
+        conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreTask.class,
+          RestoreTask.class);
+    RestoreTask service =  ReflectionUtils.newInstance(cls, conf);
+    service.setConf(conf);
+    return service;
+  }
+
+  /**
+   * Gets backup copy task
+   * @param conf - configuration
+   * @return backup copy task
+   */
+  public static BackupCopyTask getBackupCopyTask(Configuration conf) {
+    Class<? extends BackupCopyTask> cls =
+        conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyTask.class,
+          BackupCopyTask.class);
+    BackupCopyTask service = ReflectionUtils.newInstance(cls, conf);;
+    service.setConf(conf);
+    return service;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java
new file mode 100644
index 0000000..fd856ec
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.util.BackupClientUtil;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
+
+/**
+ * Backup status and related information encapsulated for a table.
+ * At this moment only TargetDir and SnapshotName is encapsulated here.
+ */
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupStatus implements Serializable {
+
+  private static final long serialVersionUID = -5968397963548535982L;
+
+  // table name for backup
+  private transient TableName table;
+
+  // target directory of the backup image for this table
+  private String targetDir;
+
+  // snapshot name for offline/online snapshot
+  private String snapshotName = null;
+
+  public BackupStatus() {
+
+  }
+
+  public BackupStatus(TableName table, String targetRootDir, String backupId) {
+    this.table = table;
+    this.targetDir = BackupClientUtil.getTableBackupDir(targetRootDir, backupId, table);
+  }
+
+  public String getSnapshotName() {
+    return snapshotName;
+  }
+
+  public void setSnapshotName(String snapshotName) {
+    this.snapshotName = snapshotName;
+  }
+
+  public String getTargetDir() {
+    return targetDir;
+  }
+
+  public TableName getTable() {
+    return table;
+  }
+
+  public void setTable(TableName table) {
+    this.table = table;
+  }
+
+  public void setTargetDir(String targetDir) {
+    this.targetDir = targetDir;
+  }
+
+  public static BackupStatus convert(BackupProtos.TableBackupStatus proto)
+  {
+    BackupStatus bs = new BackupStatus();
+    bs.setTable(ProtobufUtil.toTableName(proto.getTable()));
+    bs.setTargetDir(proto.getTargetDir());
+    if(proto.hasSnapshot()){
+      bs.setSnapshotName(proto.getSnapshot());
+    }
+    return bs;
+  }
+
+  public BackupProtos.TableBackupStatus toProto() {
+    BackupProtos.TableBackupStatus.Builder builder =
+        BackupProtos.TableBackupStatus.newBuilder();
+    if(snapshotName != null) {
+      builder.setSnapshot(snapshotName);
+    }
+    builder.setTable(ProtobufUtil.toProtoTableNameShaded(table));
+    builder.setTargetDir(targetDir);
+    return builder.build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
new file mode 100644
index 0000000..9deb15b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
@@ -0,0 +1,166 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+
+/**
+ * View to an on-disk Backup Image FileSytem
+ * Provides the set of methods necessary to interact with the on-disk Backup Image data.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class HBackupFileSystem {
+  public static final Log LOG = LogFactory.getLog(HBackupFileSystem.class);
+
+  /**
+   * This is utility class.
+   */
+  private HBackupFileSystem() {
+  }
+
+  /**
+   * Given the backup root dir, backup id and the table name, return the backup image location,
+   * which is also where the backup manifest file is. return value look like:
+   * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/"
+   * @param backupRootDir backup root directory
+   * @param backupId  backup id
+   * @param tableName table name
+   * @return backupPath String for the particular table
+   */
+  public static String getTableBackupDir(String backupRootDir, String backupId,
+      TableName tableName) {
+    return backupRootDir + Path.SEPARATOR+ backupId + Path.SEPARATOR +
+        tableName.getNamespaceAsString() + Path.SEPARATOR
+        + tableName.getQualifierAsString() + Path.SEPARATOR ;
+  }
+
+  /**
+   * Given the backup root dir, backup id and the table name, return the backup image location,
+   * which is also where the backup manifest file is. return value look like:
+   * "hdfs://backup.hbase.org:9000/user/biadmin/backup_1396650096738/backup1/default/t1_dn/"
+   * @param backupRootPath backup root path
+   * @param tableName table name
+   * @param backupId backup Id
+   * @return backupPath for the particular table
+   */
+  public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) {
+    return new Path(getTableBackupDir(backupRootPath.toString(), backupId, tableName));
+  }
+
+
+  public static List<HRegionInfo> loadRegionInfos(TableName tableName,
+    Path backupRootPath, String backupId, Configuration conf) throws IOException
+  {
+    Path backupTableRoot = getTableBackupPath(tableName, backupRootPath, backupId);
+    FileSystem fs = backupTableRoot.getFileSystem(conf);
+    RemoteIterator<LocatedFileStatus> it = fs.listFiles(backupTableRoot, true);
+    List<HRegionInfo> infos = new ArrayList<HRegionInfo>();
+    while(it.hasNext()) {
+      LocatedFileStatus lfs = it.next();
+      if(lfs.isFile() && lfs.getPath().toString().endsWith(HRegionFileSystem.REGION_INFO_FILE)) {
+        Path regionDir = lfs.getPath().getParent();
+        HRegionInfo info = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
+        infos.add(info);
+      }
+    }
+
+    Collections.sort(infos);
+    return infos;
+  }
+
+  /**
+   * Given the backup root dir and the backup id, return the log file location for an incremental
+   * backup.
+   * @param backupRootDir backup root directory
+   * @param backupId backup id
+   * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
+   */
+  public static String getLogBackupDir(String backupRootDir, String backupId) {
+    return backupRootDir + Path.SEPARATOR + backupId+ Path.SEPARATOR
+        + HConstants.HREGION_LOGDIR_NAME;
+  }
+
+  public static Path getLogBackupPath(String backupRootDir, String backupId) {
+    return new Path(getLogBackupDir(backupRootDir, backupId));
+  }
+
+  private static Path getManifestPath(TableName tableName, Configuration conf,
+      Path backupRootPath, String backupId) throws IOException {
+    Path manifestPath = new Path(getTableBackupPath(tableName, backupRootPath, backupId),
+      BackupManifest.MANIFEST_FILE_NAME);
+    FileSystem fs = backupRootPath.getFileSystem(conf);
+    if (!fs.exists(manifestPath)) {
+      // check log dir for incremental backup case
+      manifestPath =
+          new Path(getLogBackupDir(backupRootPath.toString(), backupId) + Path.SEPARATOR
+            + BackupManifest.MANIFEST_FILE_NAME);
+      if (!fs.exists(manifestPath)) {
+        String errorMsg =
+            "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for " +
+                backupId + " in " + backupRootPath.toString() +
+                ". Did " + backupId + " correspond to previously taken backup ?";
+        throw new IOException(errorMsg);
+      }
+    }
+    return manifestPath;
+  }
+
+  public static BackupManifest getManifest(TableName tableName, Configuration conf,
+      Path backupRootPath, String backupId) throws IOException {
+    BackupManifest manifest = new BackupManifest(conf,
+      getManifestPath(tableName, conf, backupRootPath, backupId));
+    return manifest;
+  }
+
+  /**
+   * Check whether the backup image path and there is manifest file in the path.
+   * @param backupManifestMap If all the manifests are found, then they are put into this map
+   * @param tableArray the tables involved
+   * @throws IOException exception
+   */
+  public static void checkImageManifestExist(HashMap<TableName, BackupManifest> backupManifestMap,
+      TableName[] tableArray, Configuration conf,
+      Path backupRootPath, String backupId) throws IOException {
+    for (TableName tableName : tableArray) {
+      BackupManifest manifest = getManifest(tableName, conf, backupRootPath, backupId);
+      backupManifestMap.put(tableName, manifest);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
new file mode 100644
index 0000000..336060f
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
@@ -0,0 +1,248 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin;
+import org.apache.hadoop.hbase.backup.util.BackupServerUtil;
+import org.apache.hadoop.hbase.backup.util.LogUtils;
+import org.apache.hadoop.hbase.backup.util.RestoreServerUtil;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreConstants {
+
+  private static final Log LOG = LogFactory.getLog(RestoreDriver.class);
+  private CommandLine cmd;
+
+  private static final String USAGE_STRING =
+      "Usage: bin/hbase restore <backup_path> <backup_id> <table(s)> [options]\n"
+          + "  backup_path     Path to a backup destination root\n"
+          + "  backup_id       Backup image ID to restore"
+          + "  table(s)        Comma-separated list of tables to restore";
+
+  private static final String USAGE_FOOTER = "";
+
+  protected RestoreDriver() throws IOException {
+    init();
+  }
+
+  protected void init() throws IOException {
+    // disable irrelevant loggers to avoid it mess up command output
+    LogUtils.disableZkAndClientLoggers(LOG);
+  }
+
+  private int parseAndRun(String[] args) throws IOException {
+    // Check if backup is enabled
+    if (!BackupManager.isBackupEnabled(getConf())) {
+      System.err.println("Backup is not enabled. To enable backup, "+
+          "set \'hbase.backup.enabled'=true and restart "+
+          "the cluster");
+      return -1;
+    }
+    // enable debug logging
+    Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
+    if (cmd.hasOption(OPTION_DEBUG)) {
+      backupClientLogger.setLevel(Level.DEBUG);
+    }
+
+    // whether to overwrite to existing table if any, false by default
+    boolean overwrite = cmd.hasOption(OPTION_OVERWRITE);
+    if (overwrite) {
+      LOG.debug("Found -overwrite option in restore command, "
+          + "will overwrite to existing table if any in the restore target");
+    }
+
+    // whether to only check the dependencies, false by default
+    boolean check = cmd.hasOption(OPTION_CHECK);
+    if (check) {
+      LOG.debug("Found -check option in restore command, "
+          + "will check and verify the dependencies");
+    }
+
+    LOG.debug("Will automatically restore all the dependencies");
+
+    // parse main restore command options
+    String[] remainArgs = cmd.getArgs();
+    if (remainArgs.length < 3 && !cmd.hasOption(OPTION_SET)
+        || (cmd.hasOption(OPTION_SET) && remainArgs.length < 2)) {
+      printToolUsage();
+      return -1;
+    }
+
+    String backupRootDir = remainArgs[0];
+    String backupId = remainArgs[1];
+    String tables = null;
+    String tableMapping =
+        cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
+    try (final Connection conn = ConnectionFactory.createConnection(conf);
+        BackupAdmin client = new HBaseBackupAdmin(conn);) {
+      // Check backup set
+      if (cmd.hasOption(OPTION_SET)) {
+        String setName = cmd.getOptionValue(OPTION_SET);
+        try {
+          tables = getTablesForSet(conn, setName, conf);
+        } catch (IOException e) {
+          System.out.println("ERROR: " + e.getMessage() + " for setName=" + setName);
+          printToolUsage();
+          return -2;
+        }
+        if (tables == null) {
+          System.out.println("ERROR: Backup set '" + setName
+              + "' is either empty or does not exist");
+          printToolUsage();
+          return -3;
+        }
+      } else {
+        tables = remainArgs[2];
+      }
+
+      TableName[] sTableArray = BackupServerUtil.parseTableNames(tables);
+      TableName[] tTableArray = BackupServerUtil.parseTableNames(tableMapping);
+
+      if (sTableArray != null && tTableArray != null
+          && (sTableArray.length != tTableArray.length)) {
+        System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping);
+        printToolUsage();
+        return -4;
+      }
+
+      client.restore(RestoreServerUtil.createRestoreRequest(backupRootDir, backupId, check,
+        sTableArray, tTableArray, overwrite));
+    } catch (Exception e) {
+      e.printStackTrace();
+      return -5;
+    }
+    return 0;
+  }
+
+  private String getTablesForSet(Connection conn, String name, Configuration conf)
+      throws IOException {
+    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
+      List<TableName> tables = table.describeBackupSet(name);
+      if (tables == null) return null;
+      return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
+    }
+  }
+
+  @Override
+  protected void addOptions() {
+    // define supported options
+    addOptNoArg(OPTION_OVERWRITE, OPTION_OVERWRITE_DESC);
+    addOptNoArg(OPTION_CHECK, OPTION_CHECK_DESC);
+    addOptNoArg(OPTION_DEBUG, OPTION_DEBUG_DESC);
+    addOptWithArg(OPTION_SET, OPTION_SET_RESTORE_DESC);
+    addOptWithArg(OPTION_TABLE_MAPPING, OPTION_TABLE_MAPPING_DESC);
+  }
+
+  @Override
+  protected void processOptions(CommandLine cmd) {
+    this.cmd = cmd;
+  }
+
+  @Override
+  protected int doWork() throws Exception {
+    return parseAndRun(cmd.getArgs());
+  }
+
+  public static void main(String[] args) throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    Path hbasedir = FSUtils.getRootDir(conf);
+    URI defaultFs = hbasedir.getFileSystem(conf).getUri();
+    FSUtils.setFsDefault(conf, new Path(defaultFs));
+    int ret = ToolRunner.run(conf, new RestoreDriver(), args);
+    System.exit(ret);
+  }
+
+  @Override
+  public int run(String[] args) throws IOException {
+    if (conf == null) {
+      LOG.error("Tool configuration is not initialized");
+      throw new NullPointerException("conf");
+    }
+
+    CommandLine cmd;
+    try {
+      // parse the command line arguments
+      cmd = parseArgs(args);
+      cmdLineArgs = args;
+    } catch (Exception e) {
+      System.out.println("Error when parsing command-line arguments: " + e.getMessage());
+      printToolUsage();
+      return EXIT_FAILURE;
+    }
+
+    if (!sanityCheckOptions(cmd) || cmd.hasOption(SHORT_HELP_OPTION)
+        || cmd.hasOption(LONG_HELP_OPTION)) {
+      printToolUsage();
+      return EXIT_FAILURE;
+    }
+
+    processOptions(cmd);
+
+    int ret = EXIT_FAILURE;
+    try {
+      ret = doWork();
+    } catch (Exception e) {
+      LOG.error("Error running command-line tool", e);
+      return EXIT_FAILURE;
+    }
+    return ret;
+  }
+
+  @Override
+  protected boolean sanityCheckOptions(CommandLine cmd) {
+    boolean success = true;
+    for (String reqOpt : requiredOptions) {
+      if (!cmd.hasOption(reqOpt)) {
+        System.out.println("Required option -" + reqOpt + " is missing");
+        success = false;
+      }
+    }
+    return success;
+  }
+
+  protected void printToolUsage() throws IOException {
+    System.out.println(USAGE_STRING);
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.setLeftPadding(2);
+    helpFormatter.setDescPadding(8);
+    helpFormatter.setWidth(100);
+    helpFormatter.setSyntaxPrefix("Options:");
+    helpFormatter.printHelp(" ", null, options, USAGE_FOOTER);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java
new file mode 100644
index 0000000..7490d20
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * POJO class for restore request
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class RestoreRequest {
+
+  private String backupRootDir;
+  private String backupId;
+  private boolean check = false;
+  private TableName[] fromTables;
+  private TableName[] toTables;
+  private boolean overwrite = false;
+
+  public RestoreRequest() {
+  }
+
+  public String getBackupRootDir() {
+    return backupRootDir;
+  }
+
+  public RestoreRequest setBackupRootDir(String backupRootDir) {
+    this.backupRootDir = backupRootDir;
+    return this;
+  }
+
+  public String getBackupId() {
+    return backupId;
+  }
+
+  public RestoreRequest setBackupId(String backupId) {
+    this.backupId = backupId;
+    return this;
+  }
+
+  public boolean isCheck() {
+    return check;
+  }
+
+  public RestoreRequest setCheck(boolean check) {
+    this.check = check;
+    return this;
+  }
+
+  public TableName[] getFromTables() {
+    return fromTables;
+  }
+
+  public RestoreRequest setFromTables(TableName[] fromTables) {
+    this.fromTables = fromTables;
+    return this;
+  }
+
+  public TableName[] getToTables() {
+    return toTables;
+  }
+
+  public RestoreRequest setToTables(TableName[] toTables) {
+    this.toTables = toTables;
+    return this;
+  }
+
+  public boolean isOverwrite() {
+    return overwrite;
+  }
+
+  public RestoreRequest setOverwrite(boolean overwrite) {
+    this.overwrite = overwrite;
+    return this;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2725fb25/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreTask.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreTask.java
new file mode 100644
index 0000000..bd43990
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreTask.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+
+/**
+ * Backup restore service interface
+ * Concrete implementation is provided by backup provider.
+ */
+
+public interface RestoreTask extends Configurable{
+
+  /**
+   * Run restore operation
+   * @param dirPaths - path array of WAL log directories
+   * @param fromTables - from tables
+   * @param toTables - to tables
+   * @param fullBackupRestore - full backup restore
+   * @throws IOException
+   */
+  void run(Path[] dirPaths, TableName[] fromTables,
+      TableName[] toTables, boolean fullBackupRestore)
+    throws IOException;
+}