You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2016/01/04 17:56:50 UTC

[05/29] hbase git commit: HBASE-14030 HBase Backup/Restore Phase 1 (Vladimir Rodionov)

http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHandler.java
new file mode 100644
index 0000000..f764f18
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHandler.java
@@ -0,0 +1,744 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+
+/**
+ * A Handler to carry the operations of backup progress
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupHandler implements Callable<Object> {
+  private static final Log LOG = LogFactory.getLog(BackupHandler.class);
+
+  // backup phase
+  // for overall backup (for table list, some table may go online, while some may go offline)
+  protected static enum BACKUPPHASE {
+    REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST;
+  }
+
+  // backup status flag
+  protected static enum BACKUPSTATUS {
+    WAITING, ONGOING, COMPLETE, FAILED, CANCELLED;
+  }
+
+  protected BackupContext backupContext;
+  private BackupManager backupManager;
+  private Configuration conf;
+
+  public BackupHandler(BackupContext backupContext, 
+      BackupManager backupManager, Configuration conf) {
+    this.backupContext = backupContext;
+    this.backupManager = backupManager;
+    this.conf = conf;
+  }
+
+  public BackupContext getBackupContext() {
+    return backupContext;
+  }
+
+  @Override
+  public Object call() throws Exception {
+    try {
+      // overall backup begin
+      this.beginBackup(backupContext);
+      HashMap<String, String> newTimestamps = null;
+      boolean fromExistingSnapshot = false; // supported by future jira
+      // handle full or incremental backup for table or table list
+      if (backupContext.getType().equals(BackupRestoreConstants.BACKUP_TYPE_FULL)) {
+        String savedStartCode = null;
+        HBaseAdmin hbadmin = null;
+        Connection conn = null;
+        boolean firstBackup = false;
+        // do snapshot for full table backup, if backing up from existing snapshot, then skip the
+        // step of taking snapshot
+        if (fromExistingSnapshot) {
+          LOG.error("Backup from existing snapshot, so skip the snapshot step. ");
+          LOG.error("This feature will be supported by a future jira");
+          throw new RuntimeException("Backup from existing snapshot is not supported");
+        } else {
+          try {
+            savedStartCode = backupManager.readBackupStartCode();
+            firstBackup = savedStartCode == null;
+            if (firstBackup) {
+              // This is our first backup. Let's put some marker on ZK so that we can hold the logs
+              // while we do the backup.
+              backupManager.writeBackupStartCode("0");
+            }
+            // We roll log here before we do the snapshot. It is possible there is duplicate data
+            // in the log that is already in the snapshot. But if we do it after the snapshot, we
+            // could have data loss.
+            // A better approach is to do the roll log on each RS in the same global procedure as
+            // the snapshot.
+            LOG.info("Execute roll log procedure for full backup ...");
+            conn = ConnectionFactory.createConnection(conf);
+            hbadmin = (HBaseAdmin) conn.getAdmin();
+            hbadmin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
+              LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, new HashMap<String, String>());
+            newTimestamps = backupManager.readRegionServerLastLogRollResult();
+            if (firstBackup) {
+              // Updates registered log files
+              // We record ALL old WAL files as registered, because
+              // this is a first full backup in the system and these
+              // files are not needed for next incremental backup
+              List<String> logFiles = BackupUtil.getWALFilesOlderThan(conf, newTimestamps);
+              backupManager.recordWALFiles(logFiles);
+            }
+            this.snapshotForFullBackup(backupContext);
+          } catch (BackupException e) {
+            // fail the overall backup and return
+            this.failBackup(backupContext, e, "Unexpected BackupException : ");
+            return null;
+          } finally {
+            if (hbadmin != null) {
+              hbadmin.close();
+            }
+            if (conn != null) {
+              conn.close();
+            }
+          }
+        }
+
+        // update the faked progress currently for snapshot done
+        this.updateProgress("10.0%", 0);
+        // do snapshot copy
+        try {
+          this.snapshotCopy(backupContext);
+        } catch (Exception e) {
+          // fail the overall backup and return
+          this.failBackup(backupContext, e, "Unexpected BackupException : ");
+          return null;
+        }
+        // Updates incremental backup table set
+        backupManager.addIncrementalBackupTableSet(backupContext.getTables());
+
+      } else if (backupContext.getType().equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+        LOG.info("For incremental backup, current table set is "
+            + backupManager.getIncrementalBackupTableSet());
+        // do incremental table backup preparation
+        backupContext.setPhase(BACKUPPHASE.PREPARE_INCREMENTAL);
+        // avoid action if has been cancelled
+        if (backupContext.isCancelled()) {
+          return null;
+        }
+        try {
+          IncrementalBackupManager incrBackupManager = new IncrementalBackupManager(backupManager);
+
+          newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext);
+        } catch (Exception e) {
+          // fail the overall backup and return
+          this.failBackup(backupContext, e, "Unexpected Exception : ");
+          return null;
+        }
+        // update the faked progress currently for incremental preparation done
+        this.updateProgress("10.0%", 0);
+
+        // do incremental copy
+        try {
+          // copy out the table and region info files for each table
+          BackupUtil.copyTableRegionInfo(backupContext, conf);
+          this.incrementalCopy(backupContext);
+          // Save list of WAL files copied
+          backupManager.recordWALFiles(backupContext.getIncrBackupFileList());
+        } catch (Exception e) {
+          // fail the overall backup and return
+          this.failBackup(backupContext, e, "Unexpected exception doing incremental copy : ");
+          return null;
+        }
+
+      } else {
+        LOG.error("Unsupport backup type: " + backupContext.getType());
+      }
+
+      // set overall backup status: complete. Here we make sure to complete the backup. After this
+      // checkpoint, even if entering cancel process, will let the backup finished
+      backupContext.setFlag(BACKUPSTATUS.COMPLETE);
+
+      if (!fromExistingSnapshot) {
+        if (backupContext.getType().equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+          // Set the previousTimestampMap which is before this current log roll to the manifest.
+          HashMap<String, HashMap<String, String>> previousTimestampMap =
+              backupManager.readLogTimestampMap();
+          backupContext.setIncrTimestampMap(previousTimestampMap);
+        }
+        // The table list in backupContext is good for both full backup and incremental backup.
+        // For incremental backup, it contains the incremental backup table set.
+
+        backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps);
+
+        HashMap<String, HashMap<String, String>> newTableSetTimestampMap =
+            backupManager.readLogTimestampMap();
+
+        String newStartCode =
+            BackupUtil.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap));
+        backupManager.writeBackupStartCode(newStartCode);
+      }
+
+      // backup complete
+      this.completeBackup(backupContext);
+    } catch (Exception e) {
+      // even during completing backup (#completeBackup(backupContext)), exception may occur, or
+      // exception occur during other process, fail the backup finally
+      this.failBackup(backupContext, e, "Error caught during backup progress: ");
+    }
+    return null;
+  }
+
+  /**
+   * Begin the overall backup.
+   * @param backupContext backup context
+   * @throws IOException exception
+   */
+  private void beginBackup(BackupContext backupContext) throws IOException {
+
+    // set the start timestamp of the overall backup
+    long startTs = EnvironmentEdgeManager.currentTime();
+    backupContext.setStartTs(startTs);
+    // set overall backup status: ongoing
+    backupContext.setFlag(BACKUPSTATUS.ONGOING);
+    LOG.info("Backup " + backupContext.getBackupId() + " starts at " + startTs + ".");
+
+    backupManager.updateBackupStatus(backupContext);
+    LOG.debug("Backup session " + backupContext.getBackupId() + " has been started.");
+
+  }
+
+  /**
+   * Snapshot for full table backup.
+   * @param backupContext backup context
+   * @throws IOException exception
+   */
+  private void snapshotForFullBackup(BackupContext backupContext) throws IOException {
+
+    LOG.info("HBase snapshot full backup for " + backupContext.getBackupId());
+
+    // avoid action if has been cancelled
+    if (backupContext.isCancelled()) {
+      return;
+    }
+
+    HBaseAdmin hbadmin = null;
+    Connection conn = null;
+
+    // we do HBase snapshot for tables in the table list one by one currently
+    for (String table : backupContext.getTables()) {
+
+      // avoid action if it has been cancelled
+      if (backupContext.isCancelled()) {
+        return;
+      }
+
+      HBaseProtos.SnapshotDescription backupSnapshot;
+      try {
+        // wrap a SnapshotDescription for offline/online snapshot
+        backupSnapshot = this.wrapSnapshotDescription(table);
+
+        // set the snapshot name in BackupStatus of this table
+        backupContext.setSnapshotName(table, backupSnapshot.getName());
+
+        // Kick off snapshot for backup
+        conn = ConnectionFactory.createConnection(conf);
+        hbadmin = (HBaseAdmin) conn.getAdmin();
+        hbadmin.snapshot(backupSnapshot);
+
+        if (LOG.isDebugEnabled() == false) {
+          // In DEBUG mode we log message already.
+          // This is not to duplicate that message.
+          LOG.info("Snapshot has been launched, waiting to finish ...");
+        }
+
+      } catch (Exception e) {
+        LOG.error("Snapshot failed to create " + getMessage(e));
+
+        // currently, we fail the overall backup if any table in the list failed, so throw the
+        // exception out for overall backup failing
+        throw new BackupException("Backup snapshot failed on table " + table, e);
+      } finally {
+        if (hbadmin != null) {
+          hbadmin.close();
+        }
+        if (conn != null) {
+          conn.close();
+        }
+      }
+
+      // set the snapshot name in BackupStatus of this table, only after snapshot success.
+      backupContext.setSnapshotName(table, backupSnapshot.getName());
+
+    } // for each table in the backup table list
+
+  }
+
+  /**
+   * Fail the overall backup.
+   * @param backupContext backup context
+   * @param e exception
+   * @throws Exception exception
+   */
+  private void failBackup(BackupContext backupContext, Exception e, String msg) throws Exception {
+
+    LOG.error(msg + getMessage(e));
+
+    // If this is a cancel exception, then we've already cleaned.
+
+    if (this.backupContext.getFlag().equals(BACKUPSTATUS.CANCELLED)) {
+      return;
+    }
+
+    // set the failure timestamp of the overall backup
+    backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
+
+    // set failure message
+    backupContext.setFailedMsg(e.getMessage());
+
+    // set overall backup status: failed
+    backupContext.setFlag(BACKUPSTATUS.FAILED);
+
+    // compose the backup failed data
+    String backupFailedData =
+        "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs()
+        + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase()
+        + ",failedmessage=" + backupContext.getFailedMsg();
+    LOG.error(backupFailedData);
+
+    backupManager.updateBackupStatus(backupContext);
+
+    // if full backup, then delete HBase snapshots if there already have snapshots taken
+    // and also clean up export snapshot log files if exist
+    if (backupContext.getType().equals(BackupRestoreConstants.BACKUP_TYPE_FULL)) {
+      if (!backupContext.fromExistingSnapshot()) {
+        this.deleteSnapshot(backupContext);
+      }
+      this.cleanupExportSnapshotLog();
+    } /*
+     * else { // support incremental backup code in future jira // TODO. See HBASE-14124 }
+     */
+
+    // clean up the uncompleted data at target directory if the ongoing backup has already entered
+    // the copy phase
+    // For incremental backup, DistCp logs will be cleaned with the targetDir.
+    this.cleanupTargetDir();
+
+    LOG.info("Backup " + backupContext.getBackupId() + " failed.");
+  }
+
+  /**
+   * Update the ongoing back token znode with new progress.
+   * @param newProgress progress
+   * @param bytesCopied bytes copied
+   * @throws NoNodeException exception
+   */
+  public void updateProgress(String newProgress, long bytesCopied) throws IOException {
+
+    // compose the new backup progress data, using fake number for now
+    String backupProgressData = newProgress;
+
+    backupContext.setProgress(newProgress);
+    backupManager.updateBackupStatus(backupContext);
+    LOG.debug("Backup progress data \"" + backupProgressData
+      + "\" has been updated to hbase:backup for " + backupContext.getBackupId());
+  }
+
+  /**
+   * Complete the overall backup.
+   * @param backupContext backup context
+   * @throws Exception exception
+   */
+  private void completeBackup(BackupContext backupContext) throws Exception {
+
+    // set the complete timestamp of the overall backup
+    backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
+    // set overall backup status: complete
+    backupContext.setFlag(BACKUPSTATUS.COMPLETE);
+    // add and store the manifest for the backup
+    this.addManifest(backupContext);
+
+    // after major steps done and manifest persisted, do convert if needed for incremental backup
+    /* in-fly convert code here, provided by future jira */
+    LOG.debug("in-fly convert code here, provided by future jira");
+
+    // compose the backup complete data
+    String backupCompleteData =
+        this.obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs()
+        + ",completets=" + backupContext.getEndTs() + ",bytescopied="
+        + backupContext.getTotalBytesCopied();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData);
+    }
+    backupManager.updateBackupStatus(backupContext);
+
+    // when full backup is done:
+    // - delete HBase snapshot
+    // - clean up directories with prefix "exportSnapshot-", which are generated when exporting
+    // snapshots
+    if (backupContext.getType().equals(BackupRestoreConstants.BACKUP_TYPE_FULL)) {
+      if (!backupContext.fromExistingSnapshot()) {
+        this.deleteSnapshot(backupContext);
+      }
+      this.cleanupExportSnapshotLog();
+    } else if (backupContext.getType().equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+      this.cleanupDistCpLog();
+    } else {
+      LOG.error(" other backup types have not been implemented yet");
+    }
+
+    LOG.info("Backup " + backupContext.getBackupId() + " completed.");
+  }
+
+  /**
+   * Get backup request meta data dir as string.
+   * @param backupContext backup context
+   * @return meta data dir
+   */
+  private String obtainBackupMetaDataStr(BackupContext backupContext) {
+    StringBuffer sb = new StringBuffer();
+    sb.append("type=" + backupContext.getType() + ",tablelist=");
+    for (String table : backupContext.getTables()) {
+      sb.append(table + ";");
+    }
+    if (sb.lastIndexOf(";") > 0) {
+      sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1);
+    }
+    sb.append(",targetRootDir=" + backupContext.getTargetRootDir());
+    if (backupContext.fromExistingSnapshot()) {
+      sb.append(",snapshot=" + backupContext.getExistingSnapshot());
+    }
+    return sb.toString();
+  }
+
+  /**
+   * Do snapshot copy.
+   * @param backupContext backup context
+   * @throws Exception exception
+   */
+  private void snapshotCopy(BackupContext backupContext) throws Exception {
+
+    LOG.info("Snapshot copy is starting.");
+
+    // set overall backup phase: snapshot_copy
+    backupContext.setPhase(BACKUPPHASE.SNAPSHOTCOPY);
+
+    // avoid action if has been cancelled
+    if (backupContext.isCancelled()) {
+      return;
+    }
+
+    // call ExportSnapshot to copy files based on hbase snapshot for backup
+    // ExportSnapshot only support single snapshot export, need loop for multiple tables case
+    BackupCopyService copyService = BackupRestoreServiceFactory.getBackupCopyService(conf);
+
+    // number of snapshots matches number of tables
+    float numOfSnapshots = backupContext.getSnapshotNames().size();
+
+    LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied.");
+
+    for (String table : backupContext.getTables()) {
+      // Currently we simply set the sub copy tasks by counting the table snapshot number, we can
+      // calculate the real files' size for the percentage in the future.
+      // TODO this below
+      // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
+      int res = 0;
+      String[] args = new String[4];
+      args[0] = "-snapshot";
+      args[1] = backupContext.getSnapshotName(table);
+      args[2] = "-copy-to";
+      args[3] = backupContext.getBackupStatus(table).getTargetDir();
+
+      LOG.debug("Copy snapshot " + args[1] + " to " + args[3]);
+      res = copyService.copy(this, conf, BackupCopyService.Type.FULL, args);
+      // if one snapshot export failed, do not continue for remained snapshots
+      if (res != 0) {
+
+        LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + ".");
+
+        throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3]
+            + " with reason code " + res);
+      }
+
+      LOG.info("Snapshot copy " + args[1] + " finished.");
+    }
+  }
+
+  /**
+   * Wrap a SnapshotDescription for a target table.
+   * @param table table
+   * @return a SnapshotDescription especially for backup.
+   */
+  private SnapshotDescription wrapSnapshotDescription(String table) {
+    // Mock a SnapshotDescription from backupContext to call SnapshotManager function,
+    // Name it in the format "snapshot_<timestamp>_<table>"
+    HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder();
+    builder.setTable(table);
+    TableName tableName = TableName.valueOf(table);
+    builder.setName("snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
+        + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString());
+    HBaseProtos.SnapshotDescription backupSnapshot = builder.build();
+
+    LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName()
+      + " from backupContext to request snapshot for backup.");
+
+    return backupSnapshot;
+  }
+
+  /**
+   * Delete HBase snapshot for backup.
+   * @param backupCtx backup context
+   * @throws Exception exception
+   */
+  private void deleteSnapshot(BackupContext backupCtx) throws IOException {
+
+    LOG.debug("Trying to delete snapshot for full backup.");
+    Connection conn = null;
+    Admin admin = null;
+    try {
+      conn = ConnectionFactory.createConnection(conf);
+      admin = conn.getAdmin();
+      for (String snapshotName : backupCtx.getSnapshotNames()) {
+        if (snapshotName == null) {
+          continue;
+        }
+        LOG.debug("Trying to delete snapshot: " + snapshotName);
+        admin.deleteSnapshot(snapshotName);
+        LOG.debug("Deleting the snapshot " + snapshotName + " for backup "
+            + backupCtx.getBackupId() + " succeeded.");
+      }
+    } finally {
+      if (admin != null) {
+        admin.close();
+      }
+      if (conn != null) {
+        conn.close();
+      }
+    }
+  }
+
+  /**
+   * Clean up directories with prefix "exportSnapshot-", which are generated when exporting
+   * snapshots.
+   * @throws IOException exception
+   */
+  private void cleanupExportSnapshotLog() throws IOException {
+    FileSystem fs = FSUtils.getCurrentFileSystem(conf);
+    Path stagingDir =
+        new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory()
+          .toString()));
+    FileStatus[] files = FSUtils.listStatus(fs, stagingDir);
+    if (files == null) {
+      return;
+    }
+    for (FileStatus file : files) {
+      if (file.getPath().getName().startsWith("exportSnapshot-")) {
+        LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName());
+        if (FSUtils.delete(fs, file.getPath(), true) == false) {
+          LOG.warn("Can not delete " + file.getPath());
+        }
+      }
+    }
+  }
+
+  /**
+   * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying
+   * hlogs.
+   * @throws IOException exception
+   */
+  private void cleanupDistCpLog() throws IOException {
+    Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent();
+    FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
+    FileStatus[] files = FSUtils.listStatus(fs, rootPath);
+    if (files == null) {
+      return;
+    }
+    for (FileStatus file : files) {
+      if (file.getPath().getName().startsWith("_distcp_logs")) {
+        LOG.debug("Delete log files of DistCp: " + file.getPath().getName());
+        FSUtils.delete(fs, file.getPath(), true);
+      }
+    }
+  }
+
+  /**
+   * Clean up the uncompleted data at target directory if the ongoing backup has already entered the
+   * copy phase.
+   */
+  private void cleanupTargetDir() {
+    try {
+      // clean up the uncompleted data at target directory if the ongoing backup has already entered
+      // the copy phase
+      LOG.debug("Trying to cleanup up target dir. Current backup phase: "
+          + backupContext.getPhase());
+      if (backupContext.getPhase().equals(BACKUPPHASE.SNAPSHOTCOPY)
+          || backupContext.getPhase().equals(BACKUPPHASE.INCREMENTAL_COPY)
+          || backupContext.getPhase().equals(BACKUPPHASE.STORE_MANIFEST)) {
+        FileSystem outputFs =
+            FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf);
+
+        // now treat one backup as a transaction, clean up data that has been partially copied at
+        // table level
+        for (String table : backupContext.getTables()) {
+          Path targetDirPath =
+              new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(),
+                backupContext.getBackupId(), table));
+          if (outputFs.delete(targetDirPath, true)) {
+            LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString()
+              + " done.");
+          } else {
+            LOG.info("No data has been copied to " + targetDirPath.toString() + ".");
+          }
+
+          Path tableDir = targetDirPath.getParent();
+          FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir);
+          if (backups == null || backups.length == 0) {
+            outputFs.delete(tableDir, true);
+            LOG.debug(tableDir.toString() + " is empty, remove it.");
+          }
+        }
+      }
+
+    } catch (IOException e1) {
+      LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at "
+          + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
+    }
+  }
+
+  /**
+   * Add manifest for the current backup. The manifest is stored 
+   * within the table backup directory.  
+   * @param backupContext The current backup context
+   * @throws IOException exception
+   * @throws BackupException exception
+   */
+  private void addManifest(BackupContext backupContext) throws IOException, BackupException {
+    // set the overall backup phase : store manifest
+    backupContext.setPhase(BACKUPPHASE.STORE_MANIFEST);
+
+    // avoid action if has been cancelled
+    if (backupContext.isCancelled()) {
+      return;
+    }
+
+    BackupManifest manifest;
+    boolean fromExistingSnapshot = false; // to be implemented in future jira
+
+    // Since we have each table's backup in its own directory structure,
+    // we'll store its manifest with the table directory.
+    for (String table : backupContext.getTables()) {
+      manifest = new BackupManifest(backupContext, table);
+      if (fromExistingSnapshot) {
+        // mark backing up from existing snapshot in manifest, so that later, dependency analysis
+        // can skip this backup image
+        LOG.debug("backup using existing snapshot will be supported in future jira");
+      } else {
+        ArrayList<BackupImage> ancestors = this.backupManager.getAncestors(backupContext, table);
+        for (BackupImage image : ancestors) {
+          manifest.addDependentImage(image);
+        }
+      }
+      if (backupContext.getType().equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+        // We'll store the log timestamps for this table only in its manifest.
+        HashMap<String, HashMap<String, String>> tableTimestampMap =
+            new HashMap<String, HashMap<String, String>>();
+        tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table));
+        manifest.setIncrTimestampMap(tableTimestampMap);
+      }
+      manifest.store(conf);
+    }
+
+    // For incremental backup, we store a overall manifest in
+    // <backup-root-dir>/WALs/<backup-id>
+    // This is used when created the next incremental backup
+    if (backupContext.getType().equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+      manifest = new BackupManifest(backupContext);
+      // set the table region server start and end timestamps for incremental backup
+      manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap());
+      ArrayList<BackupImage> ancestors = this.backupManager.getAncestors(backupContext);
+      for (BackupImage image : ancestors) {
+        manifest.addDependentImage(image);
+      }
+      manifest.store(conf);
+    }
+  }
+
+  /**
+   * Do incremental copy.
+   * @param backupContext backup context
+   */
+  private void incrementalCopy(BackupContext backupContext) throws Exception {
+
+    LOG.info("Incremental copy is starting.");
+
+    // set overall backup phase: incremental_copy
+    backupContext.setPhase(BACKUPPHASE.INCREMENTAL_COPY);
+
+    // avoid action if has been cancelled
+    if (backupContext.isCancelled()) {
+      return;
+    }
+
+    // get incremental backup file list and prepare parms for DistCp
+    List<String> incrBackupFileList = backupContext.getIncrBackupFileList();
+    String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]);
+    strArr[strArr.length - 1] = backupContext.getHLogTargetDir();
+
+    BackupCopyService copyService = BackupRestoreServiceFactory.getBackupCopyService(conf);
+    int res = copyService.copy(this, conf, BackupCopyService.Type.INCREMENTAL, strArr);
+
+    if (res != 0) {
+      LOG.error("Copy incremental log files failed with return code: " + res + ".");
+      throw new IOException("Failed of Hadoop Distributed Copy from " + incrBackupFileList + " to "
+          + backupContext.getHLogTargetDir());
+    }
+    LOG.info("Incremental copy from " + incrBackupFileList + " to "
+        + backupContext.getHLogTargetDir() + " finished.");
+
+  }
+
+  private String getMessage(Exception e) {
+    String msg = e.getMessage();
+    if (msg == null || msg.equals("")) {
+      msg = e.getClass().getName();
+    }
+    return msg;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupManager.java
new file mode 100644
index 0000000..fdb3c46
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupManager.java
@@ -0,0 +1,488 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.backup.BackupHandler.BACKUPSTATUS;
+import org.apache.hadoop.hbase.backup.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.backup.BackupUtil.BackupCompleteData;
+import org.apache.hadoop.hbase.backup.master.BackupLogCleaner;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+
+
+
+/**
+ * Handles backup requests on server-side, creates backup context records in hbase:backup 
+ * to keep track backup. The timestamps kept in hbase:backup table will be used for future 
+ * incremental backup. Creates BackupContext and DispatchRequest.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupManager {
+  private static final Log LOG = LogFactory.getLog(BackupManager.class);
+
+  private Configuration conf = null;
+  private BackupContext backupContext = null;
+  private ExecutorService pool = null;
+
+  private boolean backupComplete = false;
+
+  private BackupSystemTable systemTable;
+
+  /**
+   * Backup manager constructor.
+   * @param conf configuration
+   * @throws IOException exception
+   */
+  public BackupManager(Configuration conf) throws IOException {
+    if (!conf.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT)) {
+      throw new BackupException("HBase backup is not enabled. Check your " + 
+          HConstants.BACKUP_ENABLE_KEY + " setting.");
+    }
+    this.conf = conf;
+    this.systemTable = BackupSystemTable.getTable(conf);
+    Runtime.getRuntime().addShutdownHook(new ExitHandler());
+  }
+
+  /**
+   * This method modifies the master's configuration in order to inject backup-related features
+   * @param conf configuration
+   */
+  public static void decorateMasterConfiguration(Configuration conf) {
+    if (!isBackupEnabled(conf)) {
+      return;
+    }
+    String plugins = conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
+    String cleanerClass = BackupLogCleaner.class.getCanonicalName();
+    if (!plugins.contains(cleanerClass)) {
+      conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass);
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Added log cleaner: " + cleanerClass);
+    }
+  }
+
+  private static boolean isBackupEnabled(Configuration conf) {
+    return conf.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT);
+  }
+
+  private class ExitHandler extends Thread {
+    public ExitHandler() {
+      super("Backup Manager Exit Handler");
+    }
+
+    public void run() {
+
+      if (backupContext != null && !backupComplete) {
+
+        // program exit and backup is not complete, then mark as cancelled to avoid submitted backup
+        // handler's taking further action
+        backupContext.markCancel();
+
+        LOG.debug("Backup is cancelled due to force program exiting.");
+        try {
+          cancelBackup(backupContext.getBackupId());
+        } catch (Exception e) {
+          String msg = e.getMessage();
+          if (msg == null || msg.equals("")) {
+            msg = e.getClass().getName();
+          }
+          LOG.error("Failed to cancel backup " + backupContext.getBackupId() + " due to " + msg);
+        }
+      }
+
+      exit();
+    }
+  }
+
+  /**
+   * Cancel the ongoing backup via backup id.
+   * @param backupId The id of the ongoing backup to be cancelled
+   * @throws Exception exception
+   */
+  private void cancelBackup(String backupId) throws Exception {
+    // TODO: will be implemented in Phase 2: HBASE-14125
+    LOG.debug("Try to cancel the backup " + backupId + ". the feature is NOT implemented yet");
+
+  }
+
+  /**
+   * Stop all the work of backup.
+   */
+  public void exit() {
+
+    // currently, we shutdown now for all ongoing back handlers, we may need to do something like
+    // record the failed list somewhere later
+    if (this.pool != null) {
+      this.pool.shutdownNow();
+    }
+
+  }
+
+  /**
+   * Create a BackupContext based on input backup request.
+   * @param backupId backup id
+   * @param type    type
+   * @param tablelist table list
+   * @param targetRootDir root dir
+   * @param snapshot snapshot name
+   * @return BackupContext context
+   * @throws BackupException exception
+   */
+  protected BackupContext createBackupContext(String backupId, String type, String tablelist,
+      String targetRootDir, String snapshot) throws BackupException {
+
+    if (targetRootDir == null) {
+      throw new BackupException("Wrong backup request parameter: target backup root directory");
+    }
+
+    if (type.equals(BackupRestoreConstants.BACKUP_TYPE_FULL) && tablelist == null) {
+      // If table list is null for full backup, which means backup all tables. Then fill the table
+      // list with all user tables from meta. It no table available, throw the request exception.
+
+      HTableDescriptor[] htds = null;
+      try (Connection conn = ConnectionFactory.createConnection(conf);
+          HBaseAdmin hbadmin = (HBaseAdmin) conn.getAdmin()) {
+
+        htds = hbadmin.listTables();
+      } catch (Exception e) {
+        throw new BackupException(e);
+      }
+
+      if (htds == null) {
+        throw new BackupException("No table exists for full backup of all tables.");
+      } else {
+        StringBuilder sb = new StringBuilder();
+        for (HTableDescriptor hTableDescriptor : htds) {
+          sb.append(hTableDescriptor.getNameAsString()
+              + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
+        }
+        sb.deleteCharAt(sb.lastIndexOf(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND));
+        tablelist = sb.toString();
+
+        LOG.info("Full backup all the tables available in the cluster: " + tablelist);
+      }
+    }
+
+    // there are one or more tables in the table list
+    return new BackupContext(backupId, type,
+        tablelist.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND), targetRootDir,
+        snapshot);
+
+  }
+
+  /**
+   * Check if any ongoing backup. Currently, we only reply on checking status in hbase:backup. We
+   * need to consider to handle the case of orphan records in the future. Otherwise, all the coming
+   * request will fail.
+   * @return the ongoing backup id if on going backup exists, otherwise null
+   * @throws IOException exception
+   */
+  private String getOngoingBackupId() throws IOException {
+
+    ArrayList<BackupContext> sessions = systemTable.getBackupContexts(BACKUPSTATUS.ONGOING);
+    if (sessions.size() == 0) {
+      return null;
+    }
+    return sessions.get(0).getBackupId();
+  }
+
+  /**
+   * Start the backup manager service.
+   * @throws IOException exception
+   */
+  public void initialize() throws IOException {
+    String ongoingBackupId = this.getOngoingBackupId();
+    if (ongoingBackupId != null) {
+      LOG.info("There is a ongoing backup " + ongoingBackupId
+          + ". Can not launch new backup until no ongoing backup remains.");
+      throw new BackupException("There is ongoing backup.");
+    }
+
+    // Initialize thread pools
+    int nrThreads = this.conf.getInt("hbase.backup.threads.max", 1);
+    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
+    builder.setNameFormat("BackupHandler-%1$d");
+    this.pool =
+        new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
+            new LinkedBlockingQueue<Runnable>(), builder.build());
+    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
+  }
+
+  /**
+   * Dispatch and handle a backup request.
+   * @param backupContext backup context
+   * @throws BackupException exception
+   */
+  public void dispatchRequest(BackupContext backupContext) throws BackupException {
+
+    this.backupContext = backupContext;
+
+    LOG.info("Got a backup request: " + "Type: " + backupContext.getType() + "; Tables: "
+        + backupContext.getTableListAsString() + "; TargetRootDir: "
+        + backupContext.getTargetRootDir());
+
+    // dispatch the request to a backup handler and put it handler map
+
+    BackupHandler handler = new BackupHandler(this.backupContext, this, conf);
+    Future<Object> future = this.pool.submit(handler);
+    // wait for the execution to complete
+    try {
+      future.get();
+    } catch (InterruptedException e) {
+      throw new BackupException(e);
+    } catch (CancellationException e) {
+      throw new BackupException(e);
+    } catch (ExecutionException e) {
+      throw new BackupException(e);
+    }
+
+    // mark the backup complete for exit handler's processing
+    backupComplete = true;
+
+    LOG.info("Backup request " + backupContext.getBackupId() + " has been executed.");
+  }
+
+  /**
+   * Get direct ancestors of the current backup.
+   * @param backupCtx The backup context for the current backup
+   * @return The ancestors for the current backup
+   * @throws IOException exception
+   * @throws BackupException exception
+   */
+  protected ArrayList<BackupImage> getAncestors(BackupContext backupCtx) throws IOException,
+      BackupException {
+    LOG.debug("Getting the direct ancestors of the current backup ...");
+
+    ArrayList<BackupImage> ancestors = new ArrayList<BackupImage>();
+
+    // full backup does not have ancestor
+    if (backupCtx.getType().equals(BackupRestoreConstants.BACKUP_TYPE_FULL)) {
+      LOG.debug("Current backup is a full backup, no direct ancestor for it.");
+      return ancestors;
+    }
+
+    // get all backup history list in descending order
+
+    ArrayList<BackupCompleteData> allHistoryList = getBackupHistory();
+    for (BackupCompleteData backup : allHistoryList) {
+      BackupImage image =
+          new BackupImage(backup.getBackupToken(), backup.getType(), backup.getBackupRootPath(),
+              backup.getTableList(), Long.parseLong(backup.getStartTime()), Long.parseLong(backup
+                  .getEndTime()));
+
+      // add the full backup image as an ancestor until the last incremental backup
+      if (backup.getType().equals(BackupRestoreConstants.BACKUP_TYPE_FULL)) {
+
+        // backup image from existing snapshot does not involve in dependency
+        if (backup.fromExistingSnapshot()) {
+          continue;
+        }
+        // check the backup image coverage, if previous image could be covered by the newer ones,
+        // then no need to add
+        if (!BackupManifest.canCoverImage(ancestors, image)) {
+          ancestors.add(image);
+        }
+      } else {
+        // found last incremental backup, if previously added full backup ancestor images can cover
+        // it, then this incremental ancestor is not the dependent of the current incremental
+        // backup, that is to say, this is the backup scope boundary of current table set.
+        // Otherwise, this incremental backup ancestor is the dependent ancestor of the ongoing
+        // incremental backup
+        if (BackupManifest.canCoverImage(ancestors, image)) {
+          LOG.debug("Met the backup boundary of the current table set. "
+              + "The root full backup images for the current backup scope:");
+          for (BackupImage image1 : ancestors) {
+            LOG.debug("  BackupId: " + image1.getBackupId() + ", Backup directory: "
+                + image1.getRootDir());
+          }
+        } else {
+          Path logBackupPath =
+              HBackupFileSystem.getLogBackupPath(backup.getBackupRootPath(),
+                backup.getBackupToken());
+          LOG.debug("Current backup has an incremental backup ancestor, "
+              + "touching its image manifest in " + logBackupPath.toString()
+              + " to construct the dependency.");
+
+          BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath);
+          BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
+          ancestors.add(lastIncrImage);
+
+          LOG.debug("Last dependent incremental backup image information:");
+          LOG.debug("  Token: " + lastIncrImage.getBackupId());
+          LOG.debug("  Backup directory: " + lastIncrImage.getRootDir());
+        }
+      }
+    }
+    LOG.debug("Got " + ancestors.size() + " ancestors for the current backup.");
+    return ancestors;
+  }
+
+  /**
+   * Get the direct ancestors of this backup for one table involved.
+   * @param backupContext backup context
+   * @param table table
+   * @return backupImages on the dependency list
+   * @throws BackupException exception
+   * @throws IOException exception
+   */
+  protected ArrayList<BackupImage> getAncestors(BackupContext backupContext, String table)
+      throws BackupException, IOException {
+    ArrayList<BackupImage> ancestors = getAncestors(backupContext);
+    ArrayList<BackupImage> tableAncestors = new ArrayList<BackupImage>();
+    for (BackupImage image : ancestors) {
+      if (image.hasTable(table)) {
+        tableAncestors.add(image);
+        if (image.getType().equals(BackupRestoreConstants.BACKUP_TYPE_FULL)) {
+          break;
+        }
+      }
+    }
+    return tableAncestors;
+  }
+
+  /**
+   * hbase:backup operations
+   */
+
+  /**
+   * Updates status (state) of a backup session in a persistent store
+   * @param context context
+   * @throws IOException exception
+   */
+  public void updateBackupStatus(BackupContext context) throws IOException {
+    systemTable.updateBackupStatus(context);
+  }
+
+  /**
+   * Read the last backup start code (timestamp) of last successful backup. Will return null 
+   * if there is no startcode stored in hbase:backup or the value is of length 0. These two 
+   * cases indicate there is no successful backup completed so far.
+   * @return the timestamp of a last successful backup
+   * @throws IOException exception
+   */
+  public String readBackupStartCode() throws IOException {
+    return systemTable.readBackupStartCode();
+  }
+
+  /**
+   * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte.
+   * @param startCode start code
+   * @throws IOException exception
+   */
+  public void writeBackupStartCode(String startCode) throws IOException {
+    systemTable.writeBackupStartCode(startCode);
+  }
+
+  /**
+   * Get the RS log information after the last log roll from hbase:backup.
+   * @return RS log info
+   * @throws IOException exception
+   */
+  public HashMap<String, String> readRegionServerLastLogRollResult() throws IOException {
+    return systemTable.readRegionServerLastLogRollResult();
+  }
+
+  /**
+   * Get all completed backup information (in desc order by time)
+   * @return history info of BackupCompleteData
+   * @throws IOException exception
+   */
+  public ArrayList<BackupCompleteData> getBackupHistory() throws IOException {
+    return systemTable.getBackupHistory();
+  }
+
+  /**
+   * Write the current timestamps for each regionserver to hbase:backup after a successful full or
+   * incremental backup. Each table may have a different set of log timestamps. The saved timestamp
+   * is of the last log file that was backed up already.
+   * @param tables tables
+   * @throws IOException exception
+   */
+  public void writeRegionServerLogTimestamp(Set<String> tables,
+      HashMap<String, String> newTimestamps) throws IOException {
+    systemTable.writeRegionServerLogTimestamp(tables, newTimestamps);
+  }
+
+  /**
+   * Read the timestamp for each region server log after the last successful backup. Each table has
+   * its own set of the timestamps. The info is stored for each table as a concatinated string on ZK
+   * under //hbase//backup//incr//tablelogtimestamp//table_name
+   * @return the timestamp for each region server. key: tableName value:
+   *         RegionServer,PreviousTimeStamp
+   * @throws IOException exception
+   */
+  public HashMap<String, HashMap<String, String>> readLogTimestampMap() throws IOException {
+    return systemTable.readLogTimestampMap();
+  }
+
+  /**
+   * Return the current tables covered by incremental backup.
+   * @return set of tableNames
+   * @throws IOException exception
+   */
+  public Set<String> getIncrementalBackupTableSet() throws IOException {
+    return systemTable.getIncrementalBackupTableSet();
+  }
+
+  /**
+   * Adds set of tables to overall incremental backup table set
+   * @param tables tables
+   * @throws IOException exception
+   */
+  public void addIncrementalBackupTableSet(Set<String> tables) throws IOException {
+    systemTable.addIncrementalBackupTableSet(tables);
+  }
+  
+  /**
+   * Saves list of WAL files after incremental backup operation. These files will be stored until
+   * TTL expiration and are used by Backup Log Cleaner plugin to determine which WAL files can be
+   * safely purged.
+   */
+
+  public void recordWALFiles(List<String> files) throws IOException {
+    systemTable.addWALFiles(files, backupContext.getBackupId());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupManifest.java
new file mode 100644
index 0000000..f41540b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupManifest.java
@@ -0,0 +1,814 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+
+/**
+ * Backup manifest Contains all the meta data of a backup image. The manifest info will be bundled
+ * as manifest file together with data. So that each backup image will contain all the info needed
+ * for restore.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupManifest {
+
+  private static final Log LOG = LogFactory.getLog(BackupManifest.class);
+
+  // manifest file name
+  public static final String FILE_NAME = ".backup.manifest";
+
+  // manifest file version, current is 1.0
+  public static final String MANIFEST_VERSION = "1.0";
+
+  // tags of fields for manifest file
+  public static final String TAG_VERSION = "Manifest-Version";
+  public static final String TAG_BACKUPID = "Backup-Id";
+  public static final String TAG_BACKUPTYPE = "Backup-Type";
+  public static final String TAG_TABLESET = "Table-Set";
+  public static final String TAG_STARTTS = "Start-Timestamp";
+  public static final String TAG_COMPLETETS = "Complete-Timestamp";
+  public static final String TAG_TABLEBYTES = "Total-Table-Bytes";
+  public static final String TAG_LOGBYTES = "Total-Log-Bytes";
+  public static final String TAG_INCRTIMERANGE = "Incremental-Time-Range";
+  public static final String TAG_DEPENDENCY = "Dependency";
+  public static final String TAG_IMAGESTATE = "Image-State";
+  public static final String TAG_COMPACTION = "Compaction";
+
+  public static final String ERROR_DEPENDENCY = "DEPENDENCY_ERROR";
+
+  public static final int DELETE_SUCCESS = 0;
+  public static final int DELETE_FAILED = -1;
+
+  // currently only one state, will have CONVERTED, and MERGED in future JIRA
+  public static final String IMAGE_STATE_ORIG = "ORIGINAL";
+  public static final String IMAGE_STATE_CONVERT = "CONVERTED";
+  public static final String IMAGE_STATE_MERGE = "MERGED";
+  public static final String IMAGE_STATE_CONVERT_MERGE = "CONVERTED,MERGED";
+
+  // backup image, the dependency graph is made up by series of backup images
+
+  public static class BackupImage implements Comparable<BackupImage> {
+
+    private String backupId;
+    private String type;
+    private String rootDir;
+    private String tableSet;
+    private long startTs;
+    private long completeTs;
+    private ArrayList<BackupImage> ancestors;
+
+    public BackupImage() {
+      super();
+    }
+
+    public BackupImage(String backupId, String type, String rootDir, String tableSet, long startTs,
+        long completeTs) {
+      this.backupId = backupId;
+      this.type = type;
+      this.rootDir = rootDir;
+      this.tableSet = tableSet;
+      this.startTs = startTs;
+      this.completeTs = completeTs;
+    }
+
+    public String getBackupId() {
+      return backupId;
+    }
+
+    public void setBackupId(String backupId) {
+      this.backupId = backupId;
+    }
+
+    public String getType() {
+      return type;
+    }
+
+    public void setType(String type) {
+      this.type = type;
+    }
+
+    public String getRootDir() {
+      return rootDir;
+    }
+
+    public void setRootDir(String rootDir) {
+      this.rootDir = rootDir;
+    }
+
+    public String getTableSet() {
+      return tableSet;
+    }
+
+    public void setTableSet(String tableSet) {
+      this.tableSet = tableSet;
+    }
+
+    public long getStartTs() {
+      return startTs;
+    }
+
+    public void setStartTs(long startTs) {
+      this.startTs = startTs;
+    }
+
+    public long getCompleteTs() {
+      return completeTs;
+    }
+
+    public void setCompleteTs(long completeTs) {
+      this.completeTs = completeTs;
+    }
+
+    public ArrayList<BackupImage> getAncestors() {
+      if (this.ancestors == null) {
+        this.ancestors = new ArrayList<BackupImage>();
+      }
+      return this.ancestors;
+    }
+
+    public void addAncestor(BackupImage backupImage) {
+      this.getAncestors().add(backupImage);
+    }
+
+    public boolean hasAncestor(String token) {
+      for (BackupImage image : this.getAncestors()) {
+        if (image.getBackupId().equals(token)) {
+          return true;
+        }
+      }
+      return false;
+    }
+
+    public boolean hasTable(String table) {
+      String[] tables = this.getTableSet().split(";");
+      for (String t : tables) {
+        if (t.equals(table)) {
+          return true;
+        }
+      }
+      return false;
+    }
+
+    @Override
+    public int compareTo(BackupImage other) {
+      String thisBackupId = this.getBackupId();
+      String otherBackupId = other.getBackupId();
+      Long thisTS = new Long(thisBackupId.substring(thisBackupId.lastIndexOf("_") + 1));
+      Long otherTS = new Long(otherBackupId.substring(otherBackupId.lastIndexOf("_") + 1));
+      return thisTS.compareTo(otherTS);
+    }
+  }
+
+  // manifest version
+  private String version = MANIFEST_VERSION;
+
+  // hadoop hbase configuration
+  protected Configuration config = null;
+
+  // backup root directory
+  private String rootDir = null;
+
+  // backup image directory
+  private String tableBackupDir = null;
+
+  // backup log directory if this is an incremental backup
+  private String logBackupDir = null;
+
+  // backup token
+  private String token;
+
+  // backup type, full or incremental
+  private String type;
+
+  // the table set for the backup
+  private ArrayList<String> tableSet;
+
+  // actual start timestamp of the backup process
+  private long startTs;
+
+  // actual complete timestamp of the backup process
+  private long completeTs;
+
+  // total bytes for table backup image
+  private long tableBytes;
+
+  // total bytes for the backed-up logs for incremental backup
+  private long logBytes;
+
+  // the region server timestamp for tables:
+  // <table, <rs, timestamp>>
+  private Map<String, HashMap<String, String>> incrTimeRanges;
+
+  // dependency of this backup, including all the dependent images to do PIT recovery
+  private Map<String, BackupImage> dependency;
+
+  // the state of backup image
+  private String imageState;
+
+  // the indicator of the image compaction
+  private boolean isCompacted = false;
+
+  // the merge chain of the original backups, null if not a merged backup
+  private LinkedList<String> mergeChain;
+
+  /**
+   * Construct manifest for a ongoing backup.
+   * @param backupCtx The ongoing backup context
+   */
+  public BackupManifest(BackupContext backupCtx) {
+    this.token = backupCtx.getBackupId();
+    this.type = backupCtx.getType();
+    this.rootDir = backupCtx.getTargetRootDir();
+    if (this.type.equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+      this.logBackupDir = backupCtx.getHLogTargetDir();
+      this.logBytes = backupCtx.getTotalBytesCopied();
+    }
+    this.startTs = backupCtx.getStartTs();
+    this.completeTs = backupCtx.getEndTs();
+    this.loadTableSet(backupCtx.getTableListAsString());
+    this.setImageOriginal();
+  }
+
+  /**
+   * Construct a table level manifest for a backup of the named table.
+   * @param backupCtx The ongoing backup context
+   */
+  public BackupManifest(BackupContext backupCtx, String table) {
+    this.token = backupCtx.getBackupId();
+    this.type = backupCtx.getType();
+    this.rootDir = backupCtx.getTargetRootDir();
+    this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir();
+    if (this.type.equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+      this.logBackupDir = backupCtx.getHLogTargetDir();
+      this.logBytes = backupCtx.getTotalBytesCopied();
+    }
+    this.startTs = backupCtx.getStartTs();
+    this.completeTs = backupCtx.getEndTs();
+    this.loadTableSet(table);
+    this.setImageOriginal();
+  }
+
+  /**
+   * Construct manifest from a backup directory.
+   * @param conf configuration
+   * @param backupPath backup path
+   * @throws BackupException exception
+   */
+  public BackupManifest(Configuration conf, Path backupPath) throws BackupException {
+
+    LOG.debug("Loading manifest from: " + backupPath.toString());
+    // The input backupDir may not exactly be the backup table dir.
+    // It could be the backup log dir where there is also a manifest file stored.
+    // This variable's purpose is to keep the correct and original location so
+    // that we can store/persist it.
+    this.tableBackupDir = backupPath.toString();
+    this.config = conf;
+    try {
+
+      FileSystem fs = backupPath.getFileSystem(conf);
+      FileStatus[] subFiles = FSUtils.listStatus(fs, backupPath);
+      if (subFiles == null) {
+        String errorMsg = backupPath.toString() + " does not exist";
+        LOG.error(errorMsg);
+        throw new IOException(errorMsg);
+      }
+      for (FileStatus subFile : subFiles) {
+        if (subFile.getPath().getName().equals(FILE_NAME)) {
+
+          // load and set manifest field from file content
+          FSDataInputStream in = fs.open(subFile.getPath());
+          Properties props = new Properties();
+          try {
+            props.load(in);
+          } catch (IOException e) {
+            LOG.error("Error when loading from manifest file!");
+            throw e;
+          } finally {
+            in.close();
+          }
+
+          this.version = props.getProperty(TAG_VERSION);
+          this.token = props.getProperty(TAG_BACKUPID);
+          this.type = props.getProperty(TAG_BACKUPTYPE);
+          // Here the parameter backupDir is where the manifest file is.
+          // There should always be a manifest file under:
+          // backupRootDir/namespace/table/backupId/.backup.manifest
+          this.rootDir = backupPath.getParent().getParent().getParent().toString();
+
+          Path p = backupPath.getParent();
+          if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
+            this.rootDir = p.getParent().toString();
+          } else {
+            this.rootDir = p.getParent().getParent().toString();
+          }
+
+          this.loadTableSet(props.getProperty(TAG_TABLESET));
+
+          this.startTs = Long.parseLong(props.getProperty(TAG_STARTTS));
+          this.completeTs = Long.parseLong(props.getProperty(TAG_COMPLETETS));
+          this.tableBytes = Long.parseLong(props.getProperty(TAG_TABLEBYTES));
+          if (this.type.equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+            this.logBytes = (Long.parseLong(props.getProperty(TAG_LOGBYTES)));
+            LOG.debug("convert will be implemented by future jira");
+          }
+          this.loadIncrementalTimeRanges(props.getProperty(TAG_INCRTIMERANGE));
+          this.loadDependency(props.getProperty(TAG_DEPENDENCY));
+          this.imageState = props.getProperty(TAG_IMAGESTATE);
+          this.isCompacted =
+              props.getProperty(TAG_COMPACTION).equalsIgnoreCase("TRUE") ? true : false;
+          LOG.debug("merge and from existing snapshot will be implemented by future jira");
+          LOG.debug("Loaded manifest instance from manifest file: "
+              + FSUtils.getPath(subFile.getPath()));
+          return;
+        }
+      }
+      String errorMsg = "No manifest file found in: " + backupPath.toString();
+      LOG.error(errorMsg);
+      throw new IOException(errorMsg);
+
+    } catch (IOException e) {
+      throw new BackupException(e.getMessage());
+    }
+  }
+
+  public String getType() {
+    return type;
+  }
+
+  public void setType(String type) {
+    this.type = type;
+  }
+
+  /**
+   * Load table set from a table set list string (t1;t2;t3;...).
+   * @param tableSetStr Table set list string
+   */
+  private void loadTableSet(String tableSetStr) {
+
+    LOG.debug("Loading table set: " + tableSetStr);
+
+    String[] tableSet = tableSetStr.split(";");
+    this.tableSet = this.getTableSet();
+    if (this.tableSet.size() > 0) {
+      this.tableSet.clear();
+    }
+    for (int i = 0; i < tableSet.length; i++) {
+      this.tableSet.add(tableSet[i]);
+    }
+
+    LOG.debug(tableSet.length + " tables exist in table set.");
+  }
+
+  public void setImageOriginal() {
+    this.imageState = IMAGE_STATE_ORIG;
+  }
+
+  /**
+   * Get the table set of this image.
+   * @return The table set list
+   */
+  public ArrayList<String> getTableSet() {
+    if (this.tableSet == null) {
+      this.tableSet = new ArrayList<String>();
+    }
+    return this.tableSet;
+  }
+
+  /**
+   * Persist the manifest file.
+   * @throws IOException IOException when storing the manifest file.
+   */
+  public void store(Configuration conf) throws BackupException {
+    Properties props = new Properties();
+    props.setProperty(TAG_VERSION, this.version);
+    props.setProperty(TAG_BACKUPID, this.token);
+    props.setProperty(TAG_BACKUPTYPE, this.type);
+    props.setProperty(TAG_TABLESET, this.getTableSetStr());
+    LOG.debug("convert will be supported in future jira");
+    // String convertedTables = this.getConvertedTableSetStr();
+    // if (convertedTables != null )
+    // props.setProperty(TAG_CONVERTEDTABLESET, convertedTables);
+    props.setProperty(TAG_STARTTS, Long.toString(this.startTs));
+    props.setProperty(TAG_COMPLETETS, Long.toString(this.completeTs));
+    props.setProperty(TAG_TABLEBYTES, Long.toString(this.tableBytes));
+    if (this.type.equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+      props.setProperty(TAG_LOGBYTES, Long.toString(this.logBytes));
+    }
+    props.setProperty(TAG_INCRTIMERANGE, this.getIncrTimestampStr());
+    props.setProperty(TAG_DEPENDENCY, this.getDependencyStr());
+    props.setProperty(TAG_IMAGESTATE, this.getImageState());
+    props.setProperty(TAG_COMPACTION, this.isCompacted ? "TRUE" : "FALSE");
+    LOG.debug("merge will be supported in future jira");
+    // props.setProperty(TAG_MERGECHAIN, this.getMergeChainStr());
+    LOG.debug("backup from existing snapshot will be supported in future jira");
+    // props.setProperty(TAG_FROMSNAPSHOT, this.isFromSnapshot() ? "TRUE" : "FALSE");
+
+    // write the file, overwrite if already exist
+    Path manifestFilePath =
+        new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir)
+            + File.separator + FILE_NAME);
+    try {
+      FSDataOutputStream out = manifestFilePath.getFileSystem(conf).create(manifestFilePath, true);
+      props.store(out, "HBase backup manifest.");
+      out.close();
+    } catch (IOException e) {
+      throw new BackupException(e.getMessage());
+    }
+
+    LOG.debug("Manifest file stored to " + this.tableBackupDir != null ? this.tableBackupDir
+        : this.logBackupDir + File.separator + FILE_NAME);
+  }
+
+  /**
+   * Get the table set string in the format of t1;t2;t3...
+   */
+  private String getTableSetStr() {
+    return BackupUtil.concat(getTableSet(), ";");
+  }
+
+  public String getImageState() {
+    return imageState;
+  }
+
+  public String getVersion() {
+    return version;
+  }
+
+  /**
+   * Get this backup image.
+   * @return the backup image.
+   */
+  public BackupImage getBackupImage() {
+    return this.getDependency().get(this.token);
+  }
+
+  /**
+   * Add dependent backup image for this backup.
+   * @param image The direct dependent backup image
+   */
+  public void addDependentImage(BackupImage image) {
+    this.getDependency().get(this.token).addAncestor(image);
+    this.setDependencyMap(this.getDependency(), image);
+  }
+
+  /**
+   * Get the dependency' string in the json format.
+   */
+  private String getDependencyStr() {
+    BackupImage thisImage = this.getDependency().get(this.token);
+    if (thisImage == null) {
+      LOG.warn("There is no dependency set yet.");
+      return null;
+    }
+
+    ObjectMapper mapper = new ObjectMapper();
+    try {
+      return mapper.writeValueAsString(thisImage);
+    } catch (JsonGenerationException e) {
+      LOG.error("Error when generating dependency string from backup image.", e);
+      return ERROR_DEPENDENCY;
+    } catch (JsonMappingException e) {
+      LOG.error("Error when generating dependency string from backup image.", e);
+      return ERROR_DEPENDENCY;
+    } catch (IOException e) {
+      LOG.error("Error when generating dependency string from backup image.", e);
+      return ERROR_DEPENDENCY;
+    }
+  }
+
+  /**
+   * Get all dependent backup images. The image of this backup is also contained.
+   * @return The dependent backup images map
+   */
+  public Map<String, BackupImage> getDependency() {
+    if (this.dependency == null) {
+      this.dependency = new HashMap<String, BackupImage>();
+      LOG.debug(this.rootDir + " " + this.token + " " + this.type);
+      this.dependency.put(this.token,
+        new BackupImage(this.token, this.type, this.rootDir, this.getTableSetStr(), this.startTs,
+            this.completeTs));
+    }
+    return this.dependency;
+  }
+
+  /**
+   * Set the incremental timestamp map directly.
+   * @param incrTimestampMap timestamp map
+   */
+  public void setIncrTimestampMap(HashMap<String, HashMap<String, String>> incrTimestampMap) {
+    this.incrTimeRanges = incrTimestampMap;
+  }
+
+  /**
+   * Get the incremental time range string in the format of:
+   * t1,rs1:ts,rs2:ts,...;t2,rs1:ts,rs2:ts,...;t3,rs1:ts,rs2:ts,...
+   */
+  private String getIncrTimestampStr() {
+    StringBuilder sb = new StringBuilder();
+    for (Entry<String, HashMap<String, String>> tableEntry : this.getIncrTimestamps().entrySet()) {
+      sb.append(tableEntry.getKey() + ","); // table
+      for (Entry<String, String> rsEntry : tableEntry.getValue().entrySet()) {
+        sb.append(rsEntry.getKey() + ":"); // region server
+        sb.append(rsEntry.getValue() + ","); // timestamp
+      }
+      if (sb.length() > 1 && sb.charAt(sb.length() - 1) == ',') {
+        sb.deleteCharAt(sb.length() - 1);
+      }
+      sb.append(";");
+    }
+    if (sb.length() > 1 && sb.charAt(sb.length() - 1) == ';') {
+      sb.deleteCharAt(sb.length() - 1);
+    }
+    return sb.toString();
+  }
+
+  public Map<String, HashMap<String, String>> getIncrTimestamps() {
+    if (this.incrTimeRanges == null) {
+      this.incrTimeRanges = new HashMap<String, HashMap<String, String>>();
+    }
+    return this.incrTimeRanges;
+  }
+
+  /**
+   * Load incremental timestamps from a given string, and store them in the collection. The
+   * timestamps in string is in the format of
+   * t1,rs1:ts,rs2:ts,...;t2,rs1:ts,rs2:ts,...;t3,rs1:ts,rs2:ts,...
+   * @param timeRangesInStr Incremental time ranges in string
+   */
+  private void loadIncrementalTimeRanges(String timeRangesStr) throws IOException {
+
+    LOG.debug("Loading table's incremental time ranges of region servers from string in manifest: "
+        + timeRangesStr);
+
+    Map<String, HashMap<String, String>> timeRangeMap = this.getIncrTimestamps();
+
+    String[] entriesOfTables = timeRangesStr.split(";");
+    for (int i = 0; i < entriesOfTables.length; i++) {
+      String[] itemsForTable = entriesOfTables[i].split(",");
+
+      // validate the incremental timestamps string format for a table:
+      // t1,rs1:ts,rs2:ts,...
+      if (itemsForTable.length < 1) {
+        String errorMsg = "Wrong incremental time range format: " + timeRangesStr;
+        LOG.error(errorMsg);
+        throw new IOException(errorMsg);
+      }
+
+      HashMap<String, String> rsTimestampMap = new HashMap<String, String>();
+      for (int j = 1; j < itemsForTable.length; j++) {
+        String[] rsTsEntry = itemsForTable[j].split(":");
+
+        // validate the incremental timestamps string format for a region server:
+        // rs1:ts
+        if (rsTsEntry.length != 2) {
+          String errorMsg = "Wrong incremental timestamp format: " + itemsForTable[j];
+          LOG.error(errorMsg);
+          throw new IOException(errorMsg);
+        }
+
+        // an entry for timestamp of a region server
+        rsTimestampMap.put(rsTsEntry[0], rsTsEntry[1]);
+      }
+
+      timeRangeMap.put(itemsForTable[0], rsTimestampMap);
+    }
+
+    // all entries have been loaded
+    LOG.debug(entriesOfTables.length + " tables' incremental time ranges have been loaded.");
+  }
+
+  /**
+   * Get the image list of this backup for restore in time order.
+   * @param reverse If true, then output in reverse order, otherwise in time order from old to new
+   * @return the backup image list for restore in time order
+   */
+  public ArrayList<BackupImage> getRestoreDependentList(boolean reverse) {
+    TreeMap<Long, BackupImage> restoreImages = new TreeMap<Long, BackupImage>();
+    for (BackupImage image : this.getDependency().values()) {
+      restoreImages.put(Long.valueOf(image.startTs), image);
+    }
+    return new ArrayList<BackupImage>(reverse ? (restoreImages.descendingMap().values())
+        : (restoreImages.values()));
+  }
+
+  /**
+   * Get the dependent image list for a specific table of this backup in time order from old to new
+   * if want to restore to this backup image level.
+   * @param table table
+   * @return the backup image list for a table in time order
+   */
+  public ArrayList<BackupImage> getDependentListByTable(String table) {
+    ArrayList<BackupImage> tableImageList = new ArrayList<BackupImage>();
+    ArrayList<BackupImage> imageList = getRestoreDependentList(true);
+    for (BackupImage image : imageList) {
+      if (image.hasTable(table)) {
+        tableImageList.add(image);
+        if (image.getType().equals(BackupRestoreConstants.BACKUP_TYPE_FULL)) {
+          break;
+        }
+      }
+    }
+    Collections.reverse(tableImageList);
+    return tableImageList;
+  }
+
+  /**
+   * Get the full dependent image list in the whole dependency scope for a specific table of this
+   * backup in time order from old to new.
+   * @param table table
+   * @return the full backup image list for a table in time order in the whole scope of the
+   *         dependency of this image
+   */
+  public ArrayList<BackupImage> getAllDependentListByTable(String table) {
+    ArrayList<BackupImage> tableImageList = new ArrayList<BackupImage>();
+    ArrayList<BackupImage> imageList = getRestoreDependentList(false);
+    for (BackupImage image : imageList) {
+      if (image.hasTable(table)) {
+        tableImageList.add(image);
+      }
+    }
+    return tableImageList;
+  }
+
+  /**
+   * Load dependency from a dependency json string.
+   * @param dependencyStr The dependency string
+   * @throws IOException exception
+   */
+  private void loadDependency(String dependencyStr) throws IOException {
+
+    LOG.debug("Loading dependency: " + dependencyStr);
+
+    String msg = "Dependency is broken in the manifest.";
+    if (dependencyStr.equals(ERROR_DEPENDENCY)) {
+      throw new IOException(msg);
+    }
+
+    ObjectMapper mapper = new ObjectMapper();
+    BackupImage image = null;
+    try {
+      image = mapper.readValue(dependencyStr, BackupImage.class);
+    } catch (JsonParseException e) {
+      LOG.error(msg);
+      throw new IOException(e.getMessage());
+    } catch (JsonMappingException e) {
+      LOG.error(msg);
+      throw new IOException(e.getMessage());
+    } catch (IOException e) {
+      LOG.error(msg);
+      throw new IOException(e.getMessage());
+    }
+    LOG.debug("Manifest's current backup image information:");
+    LOG.debug("  Token: " + image.getBackupId());
+    LOG.debug("  Backup directory: " + image.getRootDir());
+    this.setDependencyMap(this.getDependency(), image);
+
+    LOG.debug("Dependent images map:");
+    for (Entry<String, BackupImage> entry : this.getDependency().entrySet()) {
+      LOG.debug("  " + entry.getKey() + " : " + entry.getValue().getBackupId() + " -- "
+          + entry.getValue().getRootDir());
+    }
+
+    LOG.debug("Dependency has been loaded.");
+  }
+
+  /**
+   * Recursively set the dependency map of the backup images.
+   * @param map The dependency map
+   * @param image The backup image
+   */
+  private void setDependencyMap(Map<String, BackupImage> map, BackupImage image) {
+    if (image == null) {
+      return;
+    } else {
+      map.put(image.getBackupId(), image);
+      for (BackupImage img : image.getAncestors()) {
+        setDependencyMap(map, img);
+      }
+    }
+  }
+
+  /**
+   * Check whether backup image1 could cover backup image2 or not.
+   * @param image1 backup image 1
+   * @param image2 backup image 2
+   * @return true if image1 can cover image2, otherwise false
+   */
+  public static boolean canCoverImage(BackupImage image1, BackupImage image2) {
+    // image1 can cover image2 only when the following conditions are satisfied:
+    // - image1 must not be an incremental image;
+    // - image1 must be taken after image2 has been taken;
+    // - table set of image1 must cover the table set of image2.
+    if (image1.getType().equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+      return false;
+    }
+    if (image1.getStartTs() < image2.getStartTs()) {
+      return false;
+    }
+    String[] image1TableSet = image1.getTableSet().split(";");
+    String[] image2TableSet = image2.getTableSet().split(";");
+    boolean found = false;
+    for (int i = 0; i < image2TableSet.length; i++) {
+      found = false;
+      for (int j = 0; j < image1TableSet.length; j++) {
+        if (image2TableSet[i].equals(image1TableSet[j])) {
+          found = true;
+          break;
+        }
+      }
+      if (!found) {
+        return false;
+      }
+    }
+
+    LOG.debug("Backup image " + image1.getBackupId() + " can cover " + image2.getBackupId());
+    return true;
+  }
+
+  /**
+   * Check whether backup image set could cover a backup image or not.
+   * @param fullImages The backup image set
+   * @param image The target backup image
+   * @return true if fullImages can cover image, otherwise false
+   */
+  public static boolean canCoverImage(ArrayList<BackupImage> fullImages, BackupImage image) {
+    // fullImages can cover image only when the following conditions are satisfied:
+    // - each image of fullImages must not be an incremental image;
+    // - each image of fullImages must be taken after image has been taken;
+    // - sum table set of fullImages must cover the table set of image.
+    for (BackupImage image1 : fullImages) {
+      if (image1.getType().equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+        return false;
+      }
+      if (image1.getStartTs() < image.getStartTs()) {
+        return false;
+      }
+    }
+
+    ArrayList<String> image1TableSet = new ArrayList<String>();
+    for (BackupImage image1 : fullImages) {
+      String[] tableSet = image1.getTableSet().split(";");
+      for (String table : tableSet) {
+        image1TableSet.add(table);
+      }
+    }
+    ArrayList<String> image2TableSet = new ArrayList<String>();
+    String[] tableSet = image.getTableSet().split(";");
+    for (String table : tableSet) {
+      image2TableSet.add(table);
+    }
+    
+    for (int i = 0; i < image2TableSet.size(); i++) {
+      if (image1TableSet.contains(image2TableSet.get(i)) == false) {
+        return false;
+      }
+    }
+
+    LOG.debug("Full image set can cover image " + image.getBackupId());
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
new file mode 100644
index 0000000..9610b27
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * HConstants holds a bunch of HBase Backup and Restore constants
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public final class BackupRestoreConstants {
+
+  // constants for znode data keys in backup znode
+  public static final String BACKUP_PROGRESS = "progress";
+  public static final String BACKUP_START_TIME = "startTs";
+  public static final String BACKUP_INPROGRESS_PHASE = "phase";
+  public static final String BACKUP_COMPLETE_TIME = "completeTs";
+  public static final String BACKUP_FAIL_TIME = "failedTs";
+  public static final String BACKUP_FAIL_PHASE = "failedphase";
+  public static final String BACKUP_FAIL_MSG = "failedmessage";
+  public static final String BACKUP_ROOT_PATH = "targetRootDir";
+  public static final String BACKUP_REQUEST_TABLE_LIST = "tablelist";
+  public static final String BACKUP_REQUEST_TYPE = "type";
+  public static final String BACKUP_BYTES_COPIED = "bytescopied";
+  public static final String BACKUP_ANCESTORS = "ancestors";
+  public static final String BACKUP_EXISTINGSNAPSHOT = "snapshot";
+
+  public static final String BACKUP_TYPE_FULL = "full";
+  public static final String BACKUP_TYPE_INCR = "incremental";
+
+  // delimiter in tablename list in restore command
+  public static final String TABLENAME_DELIMITER_IN_COMMAND = ",";
+
+  // delimiter in znode data
+  public static final String ZNODE_DATA_DELIMITER = ",";
+
+  public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root";
+
+  public static final String BACKUPID_PREFIX = "backup_";
+
+  public static enum BACKUP_COMMAND {
+    CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP,
+  }
+
+  private BackupRestoreConstants() {
+    // Can't be instantiated with this ctor.
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServiceFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServiceFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServiceFactory.java
new file mode 100644
index 0000000..1c38cf4
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServiceFactory.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyService;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceRestoreService;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.util.ReflectionUtils;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BackupRestoreServiceFactory {
+
+  public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class";
+  public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class";
+
+  private BackupRestoreServiceFactory(){
+    throw new AssertionError("Instantiating utility class...");
+  }
+  
+  /**
+   * Gets incremental restore service
+   * @param conf - configuration
+   * @return incremental backup service instance
+   */
+  public static IncrementalRestoreService getIncrementalRestoreService(Configuration conf) {
+    Class<? extends IncrementalRestoreService> cls =
+        conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreService.class,
+          IncrementalRestoreService.class);
+    return ReflectionUtils.newInstance(cls, conf);
+  }
+  
+  /**
+   * Gets backup copy service
+   * @param conf - configuration
+   * @return backup copy service
+   */
+  public static BackupCopyService getBackupCopyService(Configuration conf) {
+    Class<? extends BackupCopyService> cls =
+        conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyService.class,
+          BackupCopyService.class);
+    return ReflectionUtils.newInstance(cls, conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/de69f0df/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java
new file mode 100644
index 0000000..c26e1d4
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Backup status and related information encapsulated for a table.
+ * At this moment only TargetDir and SnapshotName is encapsulated here.
+ * future Jira will be implemented for progress, bytesCopies, phase, etc.
+ */
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupStatus implements Serializable {
+
+  private static final long serialVersionUID = -5968397963548535982L;
+
+  // table name for backup
+  private String table;
+
+  // target directory of the backup image for this table
+  private String targetDir;
+
+  // snapshot name for offline/online snapshot
+  private String snapshotName = null;
+
+  public BackupStatus(String table, String targetRootDir, String backupId) {
+    this.table = table;
+    this.targetDir = HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
+  }
+
+  public String getSnapshotName() {
+    return snapshotName;
+  }
+
+  public void setSnapshotName(String snapshotName) {
+    this.snapshotName = snapshotName;
+  }
+
+  public String getTargetDir() {
+    return targetDir;
+  }
+
+  public String getTable() {
+    return table;
+  }
+}