You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/12/02 01:22:52 UTC

[09/10] hbase git commit: HBASE-19407 [branch-2] Remove backup/restore

http://git-wip-us.apache.org/repos/asf/hbase/blob/79ac70ac/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
deleted file mode 100644
index f5f879a..0000000
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-import java.util.HashMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupManifest;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * View to an on-disk Backup Image FileSytem Provides the set of methods necessary to interact with
- * the on-disk Backup Image data.
- */
-@InterfaceAudience.Private
-public class HBackupFileSystem {
-  public static final Log LOG = LogFactory.getLog(HBackupFileSystem.class);
-
-  /**
-   * This is utility class.
-   */
-  private HBackupFileSystem() {
-  }
-
-  /**
-   * Given the backup root dir, backup id and the table name, return the backup image location,
-   * which is also where the backup manifest file is. return value look like:
-   * "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
-   * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
-   * @param backupRootDir backup root directory
-   * @param backupId backup id
-   * @param tableName table name
-   * @return backupPath String for the particular table
-   */
-  public static String
-      getTableBackupDir(String backupRootDir, String backupId, TableName tableName) {
-    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
-        + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
-        + Path.SEPARATOR;
-  }
-
-  public static String getTableBackupDataDir(String backupRootDir, String backupId,
-      TableName tableName) {
-    return getTableBackupDir(backupRootDir, backupId, tableName) + Path.SEPARATOR + "data";
-  }
-
-  public static Path getBackupPath(String backupRootDir, String backupId) {
-    return new Path(backupRootDir + Path.SEPARATOR + backupId);
-  }
-
-  /**
-   * Given the backup root dir, backup id and the table name, return the backup image location,
-   * which is also where the backup manifest file is. return value look like:
-   * "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
-   * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
-   * @param backupRootPath backup root path
-   * @param tableName table name
-   * @param backupId backup Id
-   * @return backupPath for the particular table
-   */
-  public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) {
-    return new Path(getTableBackupDir(backupRootPath.toString(), backupId, tableName));
-  }
-
-  /**
-   * Given the backup root dir and the backup id, return the log file location for an incremental
-   * backup.
-   * @param backupRootDir backup root directory
-   * @param backupId backup id
-   * @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738"
-   */
-  public static String getLogBackupDir(String backupRootDir, String backupId) {
-    return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
-        + HConstants.HREGION_LOGDIR_NAME;
-  }
-
-  public static Path getLogBackupPath(String backupRootDir, String backupId) {
-    return new Path(getLogBackupDir(backupRootDir, backupId));
-  }
-
-  // TODO we do not keep WAL files anymore
-  // Move manifest file to other place
-  private static Path getManifestPath(Configuration conf, Path backupRootPath, String backupId)
-      throws IOException {
-    Path manifestPath = null;
-
-    FileSystem fs = backupRootPath.getFileSystem(conf);
-    manifestPath =
-        new Path(getBackupPath(backupRootPath.toString(), backupId) + Path.SEPARATOR
-            + BackupManifest.MANIFEST_FILE_NAME);
-    if (!fs.exists(manifestPath)) {
-      String errorMsg =
-          "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for "
-              + backupId + ". File " + manifestPath + " does not exists. Did " + backupId
-              + " correspond to previously taken backup ?";
-      throw new IOException(errorMsg);
-    }
-    return manifestPath;
-  }
-
-  public static BackupManifest
-      getManifest(Configuration conf, Path backupRootPath, String backupId) throws IOException {
-    BackupManifest manifest =
-        new BackupManifest(conf, getManifestPath(conf, backupRootPath, backupId));
-    return manifest;
-  }
-
-  /**
-   * Check whether the backup image path and there is manifest file in the path.
-   * @param backupManifestMap If all the manifests are found, then they are put into this map
-   * @param tableArray the tables involved
-   * @throws IOException exception
-   */
-  public static void checkImageManifestExist(HashMap<TableName, BackupManifest> backupManifestMap,
-      TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId)
-      throws IOException {
-    for (TableName tableName : tableArray) {
-      BackupManifest manifest = getManifest(conf, backupRootPath, backupId);
-      backupManifestMap.put(tableName, manifest);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/79ac70ac/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/LogUtils.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/LogUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/LogUtils.java
deleted file mode 100644
index 3f62338..0000000
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/LogUtils.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
-  * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup;
-
-import org.apache.commons.logging.Log;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-
-/**
- * Utility class for disabling Zk and client logging
- *
- */
-@InterfaceAudience.Private
-final class LogUtils {
-
-  private LogUtils() {
-  }
-
-  /**
-   * Disables Zk- and HBase client logging
-   * @param log
-   */
-  static void disableZkAndClientLoggers(Log log) {
-    // disable zookeeper log to avoid it mess up command output
-    Logger zkLogger = Logger.getLogger("org.apache.zookeeper");
-    zkLogger.setLevel(Level.OFF);
-    // disable hbase zookeeper tool log to avoid it mess up command output
-    Logger hbaseZkLogger = Logger.getLogger("org.apache.hadoop.hbase.zookeeper");
-    hbaseZkLogger.setLevel(Level.OFF);
-    // disable hbase client log to avoid it mess up command output
-    Logger hbaseClientLogger = Logger.getLogger("org.apache.hadoop.hbase.client");
-    hbaseClientLogger.setLevel(Level.OFF);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/79ac70ac/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
deleted file mode 100644
index b99246b..0000000
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
+++ /dev/null
@@ -1,275 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup;
-
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_CHECK;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_CHECK_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_OVERWRITE;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_OVERWRITE_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_RESTORE_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_MAPPING;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_MAPPING_DESC;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
-import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_RESTORE_DESC;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.List;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.backup.impl.BackupManager;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.util.AbstractHBaseTool;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-
-/**
- *
- * Command-line entry point for restore operation
- *
- */
-@InterfaceAudience.Private
-public class RestoreDriver extends AbstractHBaseTool {
-
-  private static final Log LOG = LogFactory.getLog(RestoreDriver.class);
-  private CommandLine cmd;
-
-  private static final String USAGE_STRING =
-      "Usage: hbase restore <backup_path> <backup_id> [options]\n"
-          + "  backup_path     Path to a backup destination root\n"
-          + "  backup_id       Backup image ID to restore\n"
-          + "  table(s)        Comma-separated list of tables to restore\n";
-
-  private static final String USAGE_FOOTER = "";
-
-  protected RestoreDriver() throws IOException {
-    init();
-  }
-
-  protected void init() throws IOException {
-    // disable irrelevant loggers to avoid it mess up command output
-    LogUtils.disableZkAndClientLoggers(LOG);
-  }
-
-  private int parseAndRun(String[] args) throws IOException {
-    // Check if backup is enabled
-    if (!BackupManager.isBackupEnabled(getConf())) {
-      System.err.println(BackupRestoreConstants.ENABLE_BACKUP);
-      return -1;
-    }
-
-    System.out.println(BackupRestoreConstants.VERIFY_BACKUP);
-
-    // enable debug logging
-    Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
-    if (cmd.hasOption(OPTION_DEBUG)) {
-      backupClientLogger.setLevel(Level.DEBUG);
-    }
-
-    // whether to overwrite to existing table if any, false by default
-    boolean overwrite = cmd.hasOption(OPTION_OVERWRITE);
-    if (overwrite) {
-      LOG.debug("Found -overwrite option in restore command, "
-          + "will overwrite to existing table if any in the restore target");
-    }
-
-    // whether to only check the dependencies, false by default
-    boolean check = cmd.hasOption(OPTION_CHECK);
-    if (check) {
-      LOG.debug("Found -check option in restore command, "
-          + "will check and verify the dependencies");
-    }
-
-    if (cmd.hasOption(OPTION_SET) && cmd.hasOption(OPTION_TABLE)) {
-      System.err.println("Options -s and -t are mutaully exclusive,"+
-          " you can not specify both of them.");
-      printToolUsage();
-      return -1;
-    }
-
-    if (!cmd.hasOption(OPTION_SET) && !cmd.hasOption(OPTION_TABLE)) {
-      System.err.println("You have to specify either set name or table list to restore");
-      printToolUsage();
-      return -1;
-    }
-
-    if (cmd.hasOption(OPTION_YARN_QUEUE_NAME)) {
-      String queueName = cmd.getOptionValue(OPTION_YARN_QUEUE_NAME);
-      // Set system property value for MR job
-      System.setProperty("mapreduce.job.queuename", queueName);
-    }
-
-    // parse main restore command options
-    String[] remainArgs = cmd.getArgs();
-    if (remainArgs.length != 2) {
-      printToolUsage();
-      return -1;
-    }
-
-    String backupRootDir = remainArgs[0];
-    String backupId = remainArgs[1];
-    String tables = null;
-    String tableMapping =
-        cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
-    try (final Connection conn = ConnectionFactory.createConnection(conf);
-        BackupAdmin client = new BackupAdminImpl(conn);) {
-      // Check backup set
-      if (cmd.hasOption(OPTION_SET)) {
-        String setName = cmd.getOptionValue(OPTION_SET);
-        try {
-          tables = getTablesForSet(conn, setName, conf);
-        } catch (IOException e) {
-          System.out.println("ERROR: " + e.getMessage() + " for setName=" + setName);
-          printToolUsage();
-          return -2;
-        }
-        if (tables == null) {
-          System.out.println("ERROR: Backup set '" + setName
-              + "' is either empty or does not exist");
-          printToolUsage();
-          return -3;
-        }
-      } else {
-        tables = cmd.getOptionValue(OPTION_TABLE);
-      }
-
-      TableName[] sTableArray = BackupUtils.parseTableNames(tables);
-      TableName[] tTableArray = BackupUtils.parseTableNames(tableMapping);
-
-      if (sTableArray != null && tTableArray != null &&
-          (sTableArray.length != tTableArray.length)) {
-        System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping);
-        printToolUsage();
-        return -4;
-      }
-
-      client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check,
-        sTableArray, tTableArray, overwrite));
-    } catch (Exception e) {
-      e.printStackTrace();
-      return -5;
-    }
-    return 0;
-  }
-
-  private String getTablesForSet(Connection conn, String name, Configuration conf)
-      throws IOException {
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      List<TableName> tables = table.describeBackupSet(name);
-      if (tables == null) return null;
-      return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
-    }
-  }
-
-  @Override
-  protected void addOptions() {
-    // define supported options
-    addOptNoArg(OPTION_OVERWRITE, OPTION_OVERWRITE_DESC);
-    addOptNoArg(OPTION_CHECK, OPTION_CHECK_DESC);
-    addOptNoArg(OPTION_DEBUG, OPTION_DEBUG_DESC);
-    addOptWithArg(OPTION_SET, OPTION_SET_RESTORE_DESC);
-    addOptWithArg(OPTION_TABLE, OPTION_TABLE_LIST_DESC);
-    addOptWithArg(OPTION_TABLE_MAPPING, OPTION_TABLE_MAPPING_DESC);
-    addOptWithArg(OPTION_YARN_QUEUE_NAME, OPTION_YARN_QUEUE_NAME_RESTORE_DESC);
-
-  }
-
-  @Override
-  protected void processOptions(CommandLine cmd) {
-    this.cmd = cmd;
-  }
-
-  @Override
-  protected int doWork() throws Exception {
-    return parseAndRun(cmd.getArgs());
-  }
-
-  public static void main(String[] args) throws Exception {
-    Configuration conf = HBaseConfiguration.create();
-    Path hbasedir = FSUtils.getRootDir(conf);
-    URI defaultFs = hbasedir.getFileSystem(conf).getUri();
-    FSUtils.setFsDefault(conf, new Path(defaultFs));
-    int ret = ToolRunner.run(conf, new RestoreDriver(), args);
-    System.exit(ret);
-  }
-
-  @Override
-  public int run(String[] args) throws IOException {
-    if (conf == null) {
-      LOG.error("Tool configuration is not initialized");
-      throw new NullPointerException("conf");
-    }
-
-    CommandLine cmd;
-    try {
-      // parse the command line arguments
-      cmd = parseArgs(args);
-      cmdLineArgs = args;
-    } catch (Exception e) {
-      System.out.println("Error when parsing command-line arguments: " + e.getMessage());
-      printToolUsage();
-      return EXIT_FAILURE;
-    }
-
-    if (cmd.hasOption(SHORT_HELP_OPTION) || cmd.hasOption(LONG_HELP_OPTION)) {
-      printToolUsage();
-      return EXIT_FAILURE;
-    }
-
-    processOptions(cmd);
-
-    int ret = EXIT_FAILURE;
-    try {
-      ret = doWork();
-    } catch (Exception e) {
-      LOG.error("Error running command-line tool", e);
-      return EXIT_FAILURE;
-    }
-    return ret;
-  }
-
-  protected void printToolUsage() throws IOException {
-    System.out.println(USAGE_STRING);
-    HelpFormatter helpFormatter = new HelpFormatter();
-    helpFormatter.setLeftPadding(2);
-    helpFormatter.setDescPadding(8);
-    helpFormatter.setWidth(100);
-    helpFormatter.setSyntaxPrefix("Options:");
-    helpFormatter.printHelp(" ", null, options, USAGE_FOOTER);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/79ac70ac/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
deleted file mode 100644
index ca57e59..0000000
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Restore operation job interface Concrete implementation is provided by backup provider, see
- * {@link BackupRestoreFactory}
- */
-
-@InterfaceAudience.Private
-public interface RestoreJob extends Configurable {
-
-  /**
-   * Run restore operation
-   * @param dirPaths path array of WAL log directories
-   * @param fromTables from tables
-   * @param toTables to tables
-   * @param fullBackupRestore full backup restore
-   * @throws IOException
-   */
-  void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables,
-      boolean fullBackupRestore) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/79ac70ac/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java
deleted file mode 100644
index 5a0a7d4..0000000
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup;
-
-import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * POJO class for restore request
- */
-@InterfaceAudience.Private
-public class RestoreRequest {
-
-  public static class Builder {
-    RestoreRequest request;
-
-    public Builder() {
-      request = new RestoreRequest();
-    }
-
-    public Builder withBackupRootDir(String backupRootDir) {
-      request.setBackupRootDir(backupRootDir);
-      return this;
-    }
-
-    public Builder withBackupId(String backupId) {
-      request.setBackupId(backupId);
-      return this;
-    }
-
-    public Builder withCheck(boolean check) {
-      request.setCheck(check);
-      return this;
-    }
-
-    public Builder withFromTables(TableName[] fromTables) {
-      request.setFromTables(fromTables);
-      return this;
-    }
-
-    public Builder withToTables(TableName[] toTables) {
-      request.setToTables(toTables);
-      return this;
-    }
-
-    public Builder withOvewrite(boolean overwrite) {
-      request.setOverwrite(overwrite);
-      return this;
-    }
-
-
-    public RestoreRequest build() {
-      return request;
-    }
-  }
-
-  private String backupRootDir;
-  private String backupId;
-  private boolean check = false;
-  private TableName[] fromTables;
-  private TableName[] toTables;
-  private boolean overwrite = false;
-
-  private RestoreRequest() {
-  }
-
-  public String getBackupRootDir() {
-    return backupRootDir;
-  }
-
-  private RestoreRequest setBackupRootDir(String backupRootDir) {
-    this.backupRootDir = backupRootDir;
-    return this;
-  }
-
-  public String getBackupId() {
-    return backupId;
-  }
-
-  private RestoreRequest setBackupId(String backupId) {
-    this.backupId = backupId;
-    return this;
-  }
-
-  public boolean isCheck() {
-    return check;
-  }
-
-  private RestoreRequest setCheck(boolean check) {
-    this.check = check;
-    return this;
-  }
-
-  public TableName[] getFromTables() {
-    return fromTables;
-  }
-
-  private RestoreRequest setFromTables(TableName[] fromTables) {
-    this.fromTables = fromTables;
-    return this;
-  }
-
-  public TableName[] getToTables() {
-    return toTables;
-  }
-
-  private RestoreRequest setToTables(TableName[] toTables) {
-    this.toTables = toTables;
-    return this;
-  }
-
-  public boolean isOverwrite() {
-    return overwrite;
-  }
-
-  private RestoreRequest setOverwrite(boolean overwrite) {
-    this.overwrite = overwrite;
-    return this;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/79ac70ac/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
deleted file mode 100644
index 8a60e67..0000000
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
+++ /dev/null
@@ -1,743 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupAdmin;
-import org.apache.hadoop.hbase.backup.BackupClientFactory;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.backup.BackupMergeJob;
-import org.apache.hadoop.hbase.backup.BackupRequest;
-import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
-import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.RestoreRequest;
-import org.apache.hadoop.hbase.backup.util.BackupSet;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-@InterfaceAudience.Private
-public class BackupAdminImpl implements BackupAdmin {
-  public final static String CHECK_OK = "Checking backup images: OK";
-  public final static String CHECK_FAILED =
-      "Checking backup images: Failed. Some dependencies are missing for restore";
-  private static final Log LOG = LogFactory.getLog(BackupAdminImpl.class);
-
-  private final Connection conn;
-
-  public BackupAdminImpl(Connection conn) {
-    this.conn = conn;
-  }
-
-  @Override
-  public void close() throws IOException {
-  }
-
-  @Override
-  public BackupInfo getBackupInfo(String backupId) throws IOException {
-    BackupInfo backupInfo = null;
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      if (backupId == null) {
-        ArrayList<BackupInfo> recentSessions = table.getBackupInfos(BackupState.RUNNING);
-        if (recentSessions.isEmpty()) {
-          LOG.warn("No ongoing sessions found.");
-          return null;
-        }
-        // else show status for ongoing session
-        // must be one maximum
-        return recentSessions.get(0);
-      } else {
-        backupInfo = table.readBackupInfo(backupId);
-        return backupInfo;
-      }
-    }
-  }
-
-  @Override
-  public int deleteBackups(String[] backupIds) throws IOException {
-
-    int totalDeleted = 0;
-    Map<String, HashSet<TableName>> allTablesMap = new HashMap<String, HashSet<TableName>>();
-
-    boolean deleteSessionStarted = false;
-    boolean snapshotDone = false;
-    try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
-
-      // Step 1: Make sure there is no active session
-      // is running by using startBackupSession API
-      // If there is an active session in progress, exception will be thrown
-      try {
-        sysTable.startBackupExclusiveOperation();
-        deleteSessionStarted = true;
-      } catch (IOException e) {
-        LOG.warn("You can not run delete command while active backup session is in progress. \n"
-            + "If there is no active backup session running, run backup repair utility to restore \n"
-            + "backup system integrity.");
-        return -1;
-      }
-
-      // Step 2: Make sure there is no failed session
-      List<BackupInfo> list = sysTable.getBackupInfos(BackupState.RUNNING);
-      if (list.size() != 0) {
-        // ailed sessions found
-        LOG.warn("Failed backup session found. Run backup repair tool first.");
-        return -1;
-      }
-
-      // Step 3: Record delete session
-      sysTable.startDeleteOperation(backupIds);
-      // Step 4: Snapshot backup system table
-      if (!BackupSystemTable.snapshotExists(conn)) {
-        BackupSystemTable.snapshot(conn);
-      } else {
-        LOG.warn("Backup system table snapshot exists");
-      }
-      snapshotDone = true;
-      try {
-        for (int i = 0; i < backupIds.length; i++) {
-          BackupInfo info = sysTable.readBackupInfo(backupIds[i]);
-          if (info != null) {
-            String rootDir = info.getBackupRootDir();
-            HashSet<TableName> allTables = allTablesMap.get(rootDir);
-            if (allTables == null) {
-              allTables = new HashSet<TableName>();
-              allTablesMap.put(rootDir, allTables);
-            }
-            allTables.addAll(info.getTableNames());
-            totalDeleted += deleteBackup(backupIds[i], sysTable);
-          }
-        }
-        finalizeDelete(allTablesMap, sysTable);
-        // Finish
-        sysTable.finishDeleteOperation();
-        // delete snapshot
-        BackupSystemTable.deleteSnapshot(conn);
-      } catch (IOException e) {
-        // Fail delete operation
-        // Step 1
-        if (snapshotDone) {
-          if (BackupSystemTable.snapshotExists(conn)) {
-            BackupSystemTable.restoreFromSnapshot(conn);
-            // delete snapshot
-            BackupSystemTable.deleteSnapshot(conn);
-            // We still have record with unfinished delete operation
-            LOG.error("Delete operation failed, please run backup repair utility to restore "
-                + "backup system integrity", e);
-            throw e;
-          } else {
-            LOG.warn("Delete operation succeeded, there were some errors: ", e);
-          }
-        }
-
-      } finally {
-        if (deleteSessionStarted) {
-          sysTable.finishBackupExclusiveOperation();
-        }
-      }
-    }
-    return totalDeleted;
-  }
-
-  /**
-   * Updates incremental backup set for every backupRoot
-   * @param tablesMap map [backupRoot: Set<TableName>]
-   * @param table backup system table
-   * @throws IOException
-   */
-
-  private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table)
-      throws IOException {
-    for (String backupRoot : tablesMap.keySet()) {
-      Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot);
-      Map<TableName, ArrayList<BackupInfo>> tableMap =
-          table.getBackupHistoryForTableSet(incrTableSet, backupRoot);
-      for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) {
-        if (entry.getValue() == null) {
-          // No more backups for a table
-          incrTableSet.remove(entry.getKey());
-        }
-      }
-      if (!incrTableSet.isEmpty()) {
-        table.addIncrementalBackupTableSet(incrTableSet, backupRoot);
-      } else { // empty
-        table.deleteIncrementalBackupTableSet(backupRoot);
-      }
-    }
-  }
-
-  /**
-   * Delete single backup and all related backups <br>
-   * Algorithm:<br>
-   * Backup type: FULL or INCREMENTAL <br>
-   * Is this last backup session for table T: YES or NO <br>
-   * For every table T from table list 'tables':<br>
-   * if(FULL, YES) deletes only physical data (PD) <br>
-   * if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo,<br>
-   * until we either reach the most recent backup for T in the system or FULL backup<br>
-   * which includes T<br>
-   * if(INCREMENTAL, YES) deletes only physical data (PD) if(INCREMENTAL, NO) deletes physical data
-   * and for table T scans all backup images between last<br>
-   * FULL backup, which is older than the backup being deleted and the next FULL backup (if exists) <br>
-   * or last one for a particular table T and removes T from list of backup tables.
-   * @param backupId backup id
-   * @param sysTable backup system table
-   * @return total number of deleted backup images
-   * @throws IOException
-   */
-  private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException {
-
-    BackupInfo backupInfo = sysTable.readBackupInfo(backupId);
-
-    int totalDeleted = 0;
-    if (backupInfo != null) {
-      LOG.info("Deleting backup " + backupInfo.getBackupId() + " ...");
-      // Step 1: clean up data for backup session (idempotent)
-      BackupUtils.cleanupBackupData(backupInfo, conn.getConfiguration());
-      // List of tables in this backup;
-      List<TableName> tables = backupInfo.getTableNames();
-      long startTime = backupInfo.getStartTs();
-      for (TableName tn : tables) {
-        boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime);
-        if (isLastBackupSession) {
-          continue;
-        }
-        // else
-        List<BackupInfo> affectedBackups = getAffectedBackupSessions(backupInfo, tn, sysTable);
-        for (BackupInfo info : affectedBackups) {
-          if (info.equals(backupInfo)) {
-            continue;
-          }
-          removeTableFromBackupImage(info, tn, sysTable);
-        }
-      }
-      Map<byte[], String> map = sysTable.readBulkLoadedFiles(backupId);
-      FileSystem fs = FileSystem.get(conn.getConfiguration());
-      boolean success = true;
-      int numDeleted = 0;
-      for (String f : map.values()) {
-        Path p = new Path(f);
-        try {
-          LOG.debug("Delete backup info " + p + " for " + backupInfo.getBackupId());
-          if (!fs.delete(p)) {
-            if (fs.exists(p)) {
-              LOG.warn(f + " was not deleted");
-              success = false;
-            }
-          } else {
-            numDeleted++;
-          }
-        } catch (IOException ioe) {
-          LOG.warn(f + " was not deleted", ioe);
-          success = false;
-        }
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(numDeleted + " bulk loaded files out of " + map.size() + " were deleted");
-      }
-      if (success) {
-        sysTable.deleteBulkLoadedFiles(map);
-      }
-
-      sysTable.deleteBackupInfo(backupInfo.getBackupId());
-      LOG.info("Delete backup " + backupInfo.getBackupId() + " completed.");
-      totalDeleted++;
-    } else {
-      LOG.warn("Delete backup failed: no information found for backupID=" + backupId);
-    }
-    return totalDeleted;
-  }
-
-  private void
-      removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable)
-          throws IOException {
-    List<TableName> tables = info.getTableNames();
-    LOG.debug("Remove " + tn + " from " + info.getBackupId() + " tables="
-        + info.getTableListAsString());
-    if (tables.contains(tn)) {
-      tables.remove(tn);
-
-      if (tables.isEmpty()) {
-        LOG.debug("Delete backup info " + info.getBackupId());
-
-        sysTable.deleteBackupInfo(info.getBackupId());
-        // Idempotent operation
-        BackupUtils.cleanupBackupData(info, conn.getConfiguration());
-      } else {
-        info.setTables(tables);
-        sysTable.updateBackupInfo(info);
-        // Now, clean up directory for table (idempotent)
-        cleanupBackupDir(info, tn, conn.getConfiguration());
-      }
-    }
-  }
-
-  private List<BackupInfo> getAffectedBackupSessions(BackupInfo backupInfo, TableName tn,
-      BackupSystemTable table) throws IOException {
-    LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn);
-    long ts = backupInfo.getStartTs();
-    List<BackupInfo> list = new ArrayList<BackupInfo>();
-    List<BackupInfo> history = table.getBackupHistory(backupInfo.getBackupRootDir());
-    // Scan from most recent to backupInfo
-    // break when backupInfo reached
-    for (BackupInfo info : history) {
-      if (info.getStartTs() == ts) {
-        break;
-      }
-      List<TableName> tables = info.getTableNames();
-      if (tables.contains(tn)) {
-        BackupType bt = info.getType();
-        if (bt == BackupType.FULL) {
-          // Clear list if we encounter FULL backup
-          list.clear();
-        } else {
-          LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn
-              + " added " + info.getBackupId() + " tables=" + info.getTableListAsString());
-          list.add(info);
-        }
-      }
-    }
-    return list;
-  }
-
-  /**
-   * Clean up the data at target directory
-   * @throws IOException
-   */
-  private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf)
-      throws IOException {
-    try {
-      // clean up the data at target directory
-      String targetDir = backupInfo.getBackupRootDir();
-      if (targetDir == null) {
-        LOG.warn("No target directory specified for " + backupInfo.getBackupId());
-        return;
-      }
-
-      FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
-
-      Path targetDirPath =
-          new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(),
-            backupInfo.getBackupId(), table));
-      if (outputFs.delete(targetDirPath, true)) {
-        LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
-      } else {
-        LOG.info("No data has been found in " + targetDirPath.toString() + ".");
-      }
-
-    } catch (IOException e1) {
-      LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table
-          + "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
-      throw e1;
-    }
-  }
-
-  private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime)
-      throws IOException {
-    List<BackupInfo> history = table.getBackupHistory();
-    for (BackupInfo info : history) {
-      List<TableName> tables = info.getTableNames();
-      if (!tables.contains(tn)) {
-        continue;
-      }
-      if (info.getStartTs() <= startTime) {
-        return true;
-      } else {
-        return false;
-      }
-    }
-    return false;
-  }
-
-  @Override
-  public List<BackupInfo> getHistory(int n) throws IOException {
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      List<BackupInfo> history = table.getBackupHistory();
-      if (history.size() <= n) return history;
-      List<BackupInfo> list = new ArrayList<BackupInfo>();
-      for (int i = 0; i < n; i++) {
-        list.add(history.get(i));
-      }
-      return list;
-    }
-  }
-
-  @Override
-  public List<BackupInfo> getHistory(int n, BackupInfo.Filter... filters) throws IOException {
-    if (filters.length == 0) return getHistory(n);
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      List<BackupInfo> history = table.getBackupHistory();
-      List<BackupInfo> result = new ArrayList<BackupInfo>();
-      for (BackupInfo bi : history) {
-        if (result.size() == n) break;
-        boolean passed = true;
-        for (int i = 0; i < filters.length; i++) {
-          if (!filters[i].apply(bi)) {
-            passed = false;
-            break;
-          }
-        }
-        if (passed) {
-          result.add(bi);
-        }
-      }
-      return result;
-    }
-  }
-
-  @Override
-  public List<BackupSet> listBackupSets() throws IOException {
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      List<String> list = table.listBackupSets();
-      List<BackupSet> bslist = new ArrayList<BackupSet>();
-      for (String s : list) {
-        List<TableName> tables = table.describeBackupSet(s);
-        if (tables != null) {
-          bslist.add(new BackupSet(s, tables));
-        }
-      }
-      return bslist;
-    }
-  }
-
-  @Override
-  public BackupSet getBackupSet(String name) throws IOException {
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      List<TableName> list = table.describeBackupSet(name);
-      if (list == null) return null;
-      return new BackupSet(name, list);
-    }
-  }
-
-  @Override
-  public boolean deleteBackupSet(String name) throws IOException {
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      if (table.describeBackupSet(name) == null) {
-        return false;
-      }
-      table.deleteBackupSet(name);
-      return true;
-    }
-  }
-
-  @Override
-  public void addToBackupSet(String name, TableName[] tables) throws IOException {
-    String[] tableNames = new String[tables.length];
-    try (final BackupSystemTable table = new BackupSystemTable(conn);
-        final Admin admin = conn.getAdmin();) {
-      for (int i = 0; i < tables.length; i++) {
-        tableNames[i] = tables[i].getNameAsString();
-        if (!admin.tableExists(TableName.valueOf(tableNames[i]))) {
-          throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist");
-        }
-      }
-      table.addToBackupSet(name, tableNames);
-      LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name
-          + "' backup set");
-    }
-  }
-
-  @Override
-  public void removeFromBackupSet(String name, TableName[] tables) throws IOException {
-    LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'");
-    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-      table.removeFromBackupSet(name, toStringArray(tables));
-      LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name
-          + "' completed.");
-    }
-  }
-
-  private String[] toStringArray(TableName[] list) {
-    String[] arr = new String[list.length];
-    for (int i = 0; i < list.length; i++) {
-      arr[i] = list[i].toString();
-    }
-    return arr;
-  }
-
-  @Override
-  public void restore(RestoreRequest request) throws IOException {
-    if (request.isCheck()) {
-      HashMap<TableName, BackupManifest> backupManifestMap = new HashMap<>();
-      // check and load backup image manifest for the tables
-      Path rootPath = new Path(request.getBackupRootDir());
-      String backupId = request.getBackupId();
-      TableName[] sTableArray = request.getFromTables();
-      HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray,
-        conn.getConfiguration(), rootPath, backupId);
-
-      // Check and validate the backup image and its dependencies
-
-      if (BackupUtils.validate(backupManifestMap, conn.getConfiguration())) {
-        LOG.info(CHECK_OK);
-      } else {
-        LOG.error(CHECK_FAILED);
-      }
-      return;
-    }
-    // Execute restore request
-    new RestoreTablesClient(conn, request).execute();
-  }
-
-  @Override
-  public String backupTables(BackupRequest request) throws IOException {
-    BackupType type = request.getBackupType();
-    String targetRootDir = request.getTargetRootDir();
-    List<TableName> tableList = request.getTableList();
-
-    String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime();
-    if (type == BackupType.INCREMENTAL) {
-      Set<TableName> incrTableSet = null;
-      try (BackupSystemTable table = new BackupSystemTable(conn)) {
-        incrTableSet = table.getIncrementalBackupTableSet(targetRootDir);
-      }
-
-      if (incrTableSet.isEmpty()) {
-        String msg =
-            "Incremental backup table set contains no tables. "
-                + "You need to run full backup first "
-                + (tableList != null ? "on " + StringUtils.join(tableList, ",") : "");
-
-        throw new IOException(msg);
-      }
-      if (tableList != null) {
-        tableList.removeAll(incrTableSet);
-        if (!tableList.isEmpty()) {
-          String extraTables = StringUtils.join(tableList, ",");
-          String msg =
-              "Some tables (" + extraTables + ") haven't gone through full backup. "
-                  + "Perform full backup on " + extraTables + " first, " + "then retry the command";
-          throw new IOException(msg);
-        }
-      }
-      tableList = Lists.newArrayList(incrTableSet);
-    }
-    if (tableList != null && !tableList.isEmpty()) {
-      for (TableName table : tableList) {
-        String targetTableBackupDir =
-            HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
-        Path targetTableBackupDirPath = new Path(targetTableBackupDir);
-        FileSystem outputFs =
-            FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration());
-        if (outputFs.exists(targetTableBackupDirPath)) {
-          throw new IOException("Target backup directory " + targetTableBackupDir
-              + " exists already.");
-        }
-      }
-      ArrayList<TableName> nonExistingTableList = null;
-      try (Admin admin = conn.getAdmin();) {
-        for (TableName tableName : tableList) {
-          if (!admin.tableExists(tableName)) {
-            if (nonExistingTableList == null) {
-              nonExistingTableList = new ArrayList<>();
-            }
-            nonExistingTableList.add(tableName);
-          }
-        }
-      }
-      if (nonExistingTableList != null) {
-        if (type == BackupType.INCREMENTAL) {
-          // Update incremental backup set
-          tableList = excludeNonExistingTables(tableList, nonExistingTableList);
-        } else {
-          // Throw exception only in full mode - we try to backup non-existing table
-          throw new IOException("Non-existing tables found in the table list: "
-              + nonExistingTableList);
-        }
-      }
-    }
-
-    // update table list
-    BackupRequest.Builder builder = new BackupRequest.Builder();
-    request =
-        builder.withBackupType(request.getBackupType()).withTableList(tableList)
-            .withTargetRootDir(request.getTargetRootDir())
-            .withBackupSetName(request.getBackupSetName()).withTotalTasks(request.getTotalTasks())
-            .withBandwidthPerTasks((int) request.getBandwidth()).build();
-
-    TableBackupClient client = null;
-    try {
-      client = BackupClientFactory.create(conn, backupId, request);
-    } catch (IOException e) {
-      LOG.error("There is an active session already running");
-      throw e;
-    }
-
-    client.execute();
-
-    return backupId;
-  }
-
-  private List<TableName> excludeNonExistingTables(List<TableName> tableList,
-      List<TableName> nonExistingTableList) {
-
-    for (TableName table : nonExistingTableList) {
-      tableList.remove(table);
-    }
-    return tableList;
-  }
-
-  @Override
-  public void mergeBackups(String[] backupIds) throws IOException {
-    try (final BackupSystemTable sysTable = new BackupSystemTable(conn);) {
-      checkIfValidForMerge(backupIds, sysTable);
-      BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration());
-      job.run(backupIds);
-    }
-  }
-
-  /**
-   * Verifies that backup images are valid for merge.
-   *
-   * <ul>
-   * <li>All backups MUST be in the same destination
-   * <li>No FULL backups are allowed - only INCREMENTAL
-   * <li>All backups must be in COMPLETE state
-   * <li>No holes in backup list are allowed
-   * </ul>
-   * <p>
-   * @param backupIds list of backup ids
-   * @param table backup system table
-   * @throws IOException
-   */
-  private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) throws IOException {
-    String backupRoot = null;
-
-    final Set<TableName> allTables = new HashSet<TableName>();
-    final Set<String> allBackups = new HashSet<String>();
-    long minTime = Long.MAX_VALUE, maxTime = Long.MIN_VALUE;
-    for (String backupId : backupIds) {
-      BackupInfo bInfo = table.readBackupInfo(backupId);
-      if (bInfo == null) {
-        String msg = "Backup session " + backupId + " not found";
-        throw new IOException(msg);
-      }
-      if (backupRoot == null) {
-        backupRoot = bInfo.getBackupRootDir();
-      } else if (!bInfo.getBackupRootDir().equals(backupRoot)) {
-        throw new IOException("Found different backup destinations in a list of a backup sessions \n"
-            + "1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir());
-      }
-      if (bInfo.getType() == BackupType.FULL) {
-        throw new IOException("FULL backup image can not be merged for: \n" + bInfo);
-      }
-
-      if (bInfo.getState() != BackupState.COMPLETE) {
-        throw new IOException("Backup image " + backupId
-            + " can not be merged becuase of its state: " + bInfo.getState());
-      }
-      allBackups.add(backupId);
-      allTables.addAll(bInfo.getTableNames());
-      long time = bInfo.getStartTs();
-      if (time < minTime) {
-        minTime = time;
-      }
-      if (time > maxTime) {
-        maxTime = time;
-      }
-    }
-
-
-    final long startRangeTime  = minTime;
-    final long endRangeTime = maxTime;
-    final String backupDest = backupRoot;
-    // Check we have no 'holes' in backup id list
-    // Filter 1 : backupRoot
-    // Filter 2 : time range filter
-    // Filter 3 : table filter
-
-    BackupInfo.Filter destinationFilter = new  BackupInfo.Filter() {
-
-      @Override
-      public boolean apply(BackupInfo info) {
-        return info.getBackupRootDir().equals(backupDest);
-      }
-    };
-
-    BackupInfo.Filter timeRangeFilter = new  BackupInfo.Filter() {
-
-      @Override
-      public boolean apply(BackupInfo info) {
-        long time = info.getStartTs();
-        return time >= startRangeTime && time <= endRangeTime ;
-      }
-    };
-
-    BackupInfo.Filter tableFilter = new  BackupInfo.Filter() {
-
-      @Override
-      public boolean apply(BackupInfo info) {
-        List<TableName> tables = info.getTableNames();
-        return !Collections.disjoint(allTables, tables);
-      }
-    };
-
-    BackupInfo.Filter typeFilter = new  BackupInfo.Filter() {
-
-      @Override
-      public boolean apply(BackupInfo info) {
-        return info.getType() == BackupType.INCREMENTAL;
-      }
-    };
-
-    BackupInfo.Filter stateFilter = new  BackupInfo.Filter() {
-      @Override
-      public boolean apply(BackupInfo info) {
-        return info.getState() == BackupState.COMPLETE;
-      }
-    };
-
-    List<BackupInfo> allInfos =
-        table.getBackupHistory( -1, destinationFilter,
-          timeRangeFilter, tableFilter, typeFilter, stateFilter);
-    if (allInfos.size() != allBackups.size()) {
-      // Yes we have at least one  hole in backup image sequence
-      List<String> missingIds = new ArrayList<String>();
-      for(BackupInfo info: allInfos) {
-        if(allBackups.contains(info.getBackupId())) {
-          continue;
-        }
-        missingIds.add(info.getBackupId());
-      }
-      String errMsg =
-          "Sequence of backup ids has 'holes'. The following backup images must be added:" +
-           org.apache.hadoop.util.StringUtils.join(",", missingIds);
-      throw new IOException(errMsg);
-    }
-  }
-}